aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/include/cpu-sh4/cpu/dma-register.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/include/cpu-sh4/cpu/dma-register.h')
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-register.h102
1 files changed, 102 insertions, 0 deletions
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h
new file mode 100644
index 00000000000..02788b6a03b
--- /dev/null
+++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h
@@ -0,0 +1,102 @@
+/*
+ * SH4 CPU-specific DMA definitions, used by both DMA drivers
+ *
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef CPU_DMA_REGISTER_H
+#define CPU_DMA_REGISTER_H
+
+/* SH7751/7760/7780 DMA IRQ sources */
+
+#ifdef CONFIG_CPU_SH4A
+
+#define DMAOR_INIT DMAOR_DME
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7343)
+#define CHCR_TS_LOW_MASK 0x00000018
+#define CHCR_TS_LOW_SHIFT 3
+#define CHCR_TS_HIGH_MASK 0
+#define CHCR_TS_HIGH_SHIFT 0
+#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7723) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7724) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7730) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
+#define CHCR_TS_LOW_MASK 0x00000018
+#define CHCR_TS_LOW_SHIFT 3
+#define CHCR_TS_HIGH_MASK 0x00300000
+#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785)
+#define CHCR_TS_LOW_MASK 0x00000018
+#define CHCR_TS_LOW_SHIFT 3
+#define CHCR_TS_HIGH_MASK 0x00100000
+#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+#endif
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_64BIT = 7,
+ XMIT_SZ_128BIT = 3,
+ XMIT_SZ_256BIT = 4,
+ XMIT_SZ_128BIT_BLK = 0xb,
+ XMIT_SZ_256BIT_BLK = 0xc,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define TS_SHIFT { \
+ [XMIT_SZ_8BIT] = 0, \
+ [XMIT_SZ_16BIT] = 1, \
+ [XMIT_SZ_32BIT] = 2, \
+ [XMIT_SZ_64BIT] = 3, \
+ [XMIT_SZ_128BIT] = 4, \
+ [XMIT_SZ_256BIT] = 5, \
+ [XMIT_SZ_128BIT_BLK] = 4, \
+ [XMIT_SZ_256BIT_BLK] = 5, \
+}
+
+#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
+ (((i) & 0xc) << CHCR_TS_HIGH_SHIFT))
+
+#else /* CONFIG_CPU_SH4A */
+
+#define DMAOR_INIT (0x8000 | DMAOR_DME)
+
+#define CHCR_TS_LOW_MASK 0x70
+#define CHCR_TS_LOW_SHIFT 4
+#define CHCR_TS_HIGH_MASK 0
+#define CHCR_TS_HIGH_SHIFT 0
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 1,
+ XMIT_SZ_16BIT = 2,
+ XMIT_SZ_32BIT = 3,
+ XMIT_SZ_64BIT = 0,
+ XMIT_SZ_256BIT = 4,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define TS_SHIFT { \
+ [XMIT_SZ_8BIT] = 0, \
+ [XMIT_SZ_16BIT] = 1, \
+ [XMIT_SZ_32BIT] = 2, \
+ [XMIT_SZ_64BIT] = 3, \
+ [XMIT_SZ_256BIT] = 5, \
+}
+
+#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT)
+
+#endif /* CONFIG_CPU_SH4A */
+
+#endif
h'>
-rw-r--r--drivers/usb/host/ehci-octeon.c188
-rw-r--r--drivers/usb/host/ehci-omap.c327
-rw-r--r--drivers/usb/host/ehci-orion.c254
-rw-r--r--drivers/usb/host/ehci-pci.c410
-rw-r--r--drivers/usb/host/ehci-platform.c410
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c330
-rw-r--r--drivers/usb/host/ehci-ppc-of.c119
-rw-r--r--drivers/usb/host/ehci-ps3.c64
-rw-r--r--drivers/usb/host/ehci-q.c957
-rw-r--r--drivers/usb/host/ehci-sched.c1828
-rw-r--r--drivers/usb/host/ehci-sead3.c187
-rw-r--r--drivers/usb/host/ehci-sh.c206
-rw-r--r--drivers/usb/host/ehci-spear.c195
-rw-r--r--drivers/usb/host/ehci-sysfs.c187
-rw-r--r--drivers/usb/host/ehci-tegra.c569
-rw-r--r--drivers/usb/host/ehci-tilegx.c217
-rw-r--r--drivers/usb/host/ehci-timer.c433
-rw-r--r--drivers/usb/host/ehci-w90x900.c154
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c241
-rw-r--r--drivers/usb/host/ehci.h391
-rw-r--r--drivers/usb/host/fhci-dbg.c16
-rw-r--r--drivers/usb/host/fhci-hcd.c101
-rw-r--r--drivers/usb/host/fhci-hub.c18
-rw-r--r--drivers/usb/host/fhci-mem.c3
-rw-r--r--drivers/usb/host/fhci-q.c3
-rw-r--r--drivers/usb/host/fhci-sched.c84
-rw-r--r--drivers/usb/host/fhci-tds.c72
-rw-r--r--drivers/usb/host/fhci.h53
-rw-r--r--drivers/usb/host/fotg210-hcd.c5981
-rw-r--r--drivers/usb/host/fotg210.h742
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c340
-rw-r--r--drivers/usb/host/fusbh200-hcd.c5894
-rw-r--r--drivers/usb/host/fusbh200.h731
-rw-r--r--drivers/usb/host/hwa-hc.c161
-rw-r--r--drivers/usb/host/imx21-dbg.c531
-rw-r--r--drivers/usb/host/imx21-hcd.c1947
-rw-r--r--drivers/usb/host/imx21-hcd.h444
-rw-r--r--drivers/usb/host/isp116x-hcd.c53
-rw-r--r--drivers/usb/host/isp116x.h23
-rw-r--r--drivers/usb/host/isp1362-hcd.c2838
-rw-r--r--drivers/usb/host/isp1362.h1014
-rw-r--r--drivers/usb/host/isp1760-hcd.c2544
-rw-r--r--drivers/usb/host/isp1760-hcd.h144
-rw-r--r--drivers/usb/host/isp1760-if.c262
-rw-r--r--drivers/usb/host/max3421-hcd.c1957
-rw-r--r--drivers/usb/host/octeon2-common.c200
-rw-r--r--drivers/usb/host/ohci-at91.c588
-rw-r--r--drivers/usb/host/ohci-au1xxx.c318
-rw-r--r--drivers/usb/host/ohci-da8xx.c442
-rw-r--r--drivers/usb/host/ohci-dbg.c139
-rw-r--r--drivers/usb/host/ohci-ep93xx.c214
-rw-r--r--drivers/usb/host/ohci-exynos.c363
-rw-r--r--drivers/usb/host/ohci-hcd.c494
-rw-r--r--drivers/usb/host/ohci-hub.c128
-rw-r--r--drivers/usb/host/ohci-jz4740.c253
-rw-r--r--drivers/usb/host/ohci-lh7a404.c253
-rw-r--r--drivers/usb/host/ohci-nxp.c351
-rw-r--r--drivers/usb/host/ohci-octeon.c202
-rw-r--r--drivers/usb/host/ohci-omap.c225
-rw-r--r--drivers/usb/host/ohci-omap3.c211
-rw-r--r--drivers/usb/host/ohci-pci.c311
-rw-r--r--drivers/usb/host/ohci-platform.c395
-rw-r--r--drivers/usb/host/ohci-pnx4008.c453
-rw-r--r--drivers/usb/host/ohci-pnx8550.c242
-rw-r--r--drivers/usb/host/ohci-ppc-of.c83
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c215
-rw-r--r--drivers/usb/host/ohci-ps3.c19
-rw-r--r--drivers/usb/host/ohci-pxa27x.c457
-rw-r--r--drivers/usb/host/ohci-q.c95
-rw-r--r--drivers/usb/host/ohci-s3c2410.c244
-rw-r--r--drivers/usb/host/ohci-sa1111.c309
-rw-r--r--drivers/usb/host/ohci-sh.c143
-rw-r--r--drivers/usb/host/ohci-sm501.c33
-rw-r--r--drivers/usb/host/ohci-spear.c209
-rw-r--r--drivers/usb/host/ohci-ssb.c214
-rw-r--r--drivers/usb/host/ohci-tilegx.c206
-rw-r--r--drivers/usb/host/ohci-tmio.c37
-rw-r--r--drivers/usb/host/ohci.h80
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c147
-rw-r--r--drivers/usb/host/oxu210hp.h8
-rw-r--r--drivers/usb/host/pci-quirks.c914
-rw-r--r--drivers/usb/host/pci-quirks.h19
-rw-r--r--drivers/usb/host/r8a66597-hcd.c564
-rw-r--r--drivers/usb/host/r8a66597.h519
-rw-r--r--drivers/usb/host/sl811-hcd.c244
-rw-r--r--drivers/usb/host/sl811.h21
-rw-r--r--drivers/usb/host/sl811_cs.c149
-rw-r--r--drivers/usb/host/ssb-hcd.c278
-rw-r--r--drivers/usb/host/u132-hcd.c55
-rw-r--r--drivers/usb/host/uhci-debug.c293
-rw-r--r--drivers/usb/host/uhci-grlib.c207
-rw-r--r--drivers/usb/host/uhci-hcd.c685
-rw-r--r--drivers/usb/host/uhci-hcd.h259
-rw-r--r--drivers/usb/host/uhci-hub.c102
-rw-r--r--drivers/usb/host/uhci-pci.c306
-rw-r--r--drivers/usb/host/uhci-platform.c165
-rw-r--r--drivers/usb/host/uhci-q.c277
-rw-r--r--drivers/usb/host/whci/Kbuild2
-rw-r--r--drivers/usb/host/whci/asl.c55
-rw-r--r--drivers/usb/host/whci/debug.c30
-rw-r--r--drivers/usb/host/whci/hcd.c48
-rw-r--r--drivers/usb/host/whci/init.c3
-rw-r--r--drivers/usb/host/whci/int.c1
-rw-r--r--drivers/usb/host/whci/pzl.c57
-rw-r--r--drivers/usb/host/whci/qset.c384
-rw-r--r--drivers/usb/host/whci/whcd.h10
-rw-r--r--drivers/usb/host/whci/whci-hc.h16
-rw-r--r--drivers/usb/host/whci/wusb.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c596
-rw-r--r--drivers/usb/host/xhci-ext-caps.h156
-rw-r--r--drivers/usb/host/xhci-hub.c1252
-rw-r--r--drivers/usb/host/xhci-mem.c2550
-rw-r--r--drivers/usb/host/xhci-mvebu.c72
-rw-r--r--drivers/usb/host/xhci-mvebu.h21
-rw-r--r--drivers/usb/host/xhci-pci.c415
-rw-r--r--drivers/usb/host/xhci-plat.c297
-rw-r--r--drivers/usb/host/xhci-ring.c3996
-rw-r--r--drivers/usb/host/xhci-trace.c15
-rw-r--r--drivers/usb/host/xhci-trace.h151
-rw-r--r--drivers/usb/host/xhci.c4923
-rw-r--r--drivers/usb/host/xhci.h1891
138 files changed, 59490 insertions, 10269 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2c63bfb1f8d..03314f861be 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -2,11 +2,9 @@
# USB Host Controller Drivers
#
comment "USB Host Controller Drivers"
- depends on USB
config USB_C67X00_HCD
tristate "Cypress C67x00 HCD support"
- depends on USB
help
The Cypress C67x00 (EZ-Host/EZ-OTG) chips are dual-role
host/peripheral/OTG USB controllers.
@@ -17,17 +15,37 @@ config USB_C67X00_HCD
To compile this driver as a module, choose M here: the
module will be called c67x00.
+config USB_XHCI_HCD
+ tristate "xHCI HCD (USB 3.0) support"
+ ---help---
+ The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
+ "SuperSpeed" host controller hardware.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xhci-hcd.
+
+if USB_XHCI_HCD
+
+config USB_XHCI_PLATFORM
+ tristate
+
+config USB_XHCI_MVEBU
+ tristate "xHCI support for Marvell Armada 375/38x"
+ select USB_XHCI_PLATFORM
+ depends on ARCH_MVEBU || COMPILE_TEST
+ ---help---
+ Say 'Y' to enable the support for the xHCI host controller
+ found in Marvell Armada 375/38x ARM SOCs.
+
+endif # USB_XHCI_HCD
+
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
- depends on USB && USB_ARCH_HAS_EHCI
---help---
The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
"high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
If your USB host controller supports USB 2.0, you will likely want to
- configure this Host Controller Driver. At the time of this writing,
- the primary implementation of EHCI is a chip from NEC, widely available
- in add-on PCI cards, but implementations are in the works from other
- vendors including Intel and Philips. Motherboard support is appearing.
+ configure this Host Controller Driver.
EHCI controllers are packaged with "companion" host controllers (OHCI
or UHCI) to handle USB 1.1 devices connected to root hub ports. Ports
@@ -55,8 +73,9 @@ config USB_EHCI_ROOT_HUB_TT
from ARC, and has since changed hands a few times.
config USB_EHCI_TT_NEWSCHED
- bool "Improved Transaction Translator scheduling (EXPERIMENTAL)"
- depends on USB_EHCI_HCD && EXPERIMENTAL
+ bool "Improved Transaction Translator scheduling"
+ depends on USB_EHCI_HCD
+ default y
---help---
This changes the periodic scheduling code to fill more of the low
and full speed bandwidth available from the Transaction Translator
@@ -67,38 +86,204 @@ config USB_EHCI_TT_NEWSCHED
If you have multiple periodic low/fullspeed devices connected to a
highspeed USB hub which is connected to a highspeed USB Host
Controller, and some of those devices will not work correctly
- (possibly due to "ENOSPC" or "-28" errors), say Y.
+ (possibly due to "ENOSPC" or "-28" errors), say Y. Conversely, if
+ you have only one such device and it doesn't work, you could try
+ saying N.
- If unsure, say N.
+ If unsure, say Y.
-config USB_EHCI_BIG_ENDIAN_MMIO
- bool
- depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX)
- default y
+config USB_FSL_MPH_DR_OF
+ tristate
-config USB_EHCI_BIG_ENDIAN_DESC
- bool
- depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX)
+if USB_EHCI_HCD
+
+config USB_EHCI_PCI
+ tristate
+ depends on PCI
default y
+config USB_EHCI_HCD_PMC_MSP
+ tristate "EHCI support for on-chip PMC MSP71xx USB controller"
+ depends on MSP_HAS_USB
+ default n
+ select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ ---help---
+ Enables support for the onchip USB controller on the PMC_MSP7100 Family SoC's.
+ If unsure, say N.
+
+config XPS_USB_HCD_XILINX
+ bool "Use Xilinx usb host EHCI controller core"
+ depends on (PPC32 || MICROBLAZE)
+ select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ ---help---
+ Xilinx xps USB host controller core is EHCI compilant and has
+ transaction translator built-in. It can be configured to either
+ support both high speed and full speed devices, or high speed
+ devices only.
+
config USB_EHCI_FSL
- bool "Support for Freescale on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && FSL_SOC
+ bool "Support for Freescale PPC on-chip EHCI USB controller"
+ depends on FSL_SOC
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_FSL_MPH_DR_OF if OF
+ ---help---
+ Variation of ARC USB block used in some Freescale chips.
+
+config USB_EHCI_MXC
+ tristate "Support for Freescale i.MX on-chip EHCI USB controller"
+ depends on ARCH_MXC
select USB_EHCI_ROOT_HUB_TT
---help---
Variation of ARC USB block used in some Freescale chips.
+config USB_EHCI_HCD_OMAP
+ tristate "EHCI support for OMAP3 and later chips"
+ depends on ARCH_OMAP
+ select NOP_USB_XCEIV
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ OMAP3 and later chips.
+
+config USB_EHCI_HCD_ORION
+ tristate "Support for Marvell EBU on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_ORION
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on Marvell's
+ embedded ARM SoCs, including Orion, Kirkwood, Dove, Armada XP,
+ Armada 370. This is different from the EHCI implementation
+ on Marvell's mobile PXA and MMP SoC, see "EHCI support for
+ Marvell PXA/MMP USB controller" for those.
+
+config USB_EHCI_HCD_SPEAR
+ tristate "Support for ST SPEAr on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
+
+config USB_EHCI_HCD_AT91
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ Atmel chips.
+
+config USB_EHCI_MSM
+ tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
+ depends on ARCH_MSM || ARCH_QCOM
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Enables support for the USB Host controller present on the
+ Qualcomm chipsets. Root Hub has inbuilt TT.
+ This driver depends on OTG driver for PHY initialization,
+ clock management, powering up VBUS, and power management.
+ This driver is not supported on boards like trout which
+ has an external PHY.
+
+config USB_EHCI_TEGRA
+ tristate "NVIDIA Tegra HCD support"
+ depends on ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_PHY
+ help
+ This driver enables support for the internal USB Host Controllers
+ found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
- depends on USB_EHCI_HCD && PPC_OF
+ depends on PPC_OF
default y
---help---
Enables support for the USB controller present on the PowerPC
OpenFirmware platform bus.
+config USB_EHCI_SH
+ bool "EHCI support for SuperH USB controller"
+ depends on SUPERH
+ ---help---
+ Enables support for the on-chip EHCI controller on the SuperH.
+ If you use the PCI EHCI controller, this option is not necessary.
+
+config USB_EHCI_EXYNOS
+ tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on PLAT_S5P || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+
+config USB_EHCI_MV
+ bool "EHCI support for Marvell PXA/MMP USB controller"
+ depends on (ARCH_PXA || ARCH_MMP)
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Enables support for Marvell (including PXA and MMP series) on-chip
+ USB SPH and OTG controller. SPH is a single port host, and it can
+ only be EHCI host. OTG is controller that can switch to host mode.
+ Note that this driver will not work on Marvell's other EHCI
+ controller used by the EBU-type SoCs including Orion, Kirkwood,
+ Dova, Armada 370 and Armada XP. See "Support for Marvell EBU
+ on-chip EHCI USB controller" for those.
+
+config USB_W90X900_EHCI
+ tristate "W90X900(W90P910) EHCI support"
+ depends on ARCH_W90X900
+ ---help---
+ Enables support for the W90X900 USB controller
+
+config USB_CNS3XXX_EHCI
+ bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
+ depends on ARCH_CNS3XXX
+ select USB_EHCI_HCD_PLATFORM
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_EHCI_HCD_PLATFORM instead.
+
+ Enable support for the CNS3XXX SOC's on-chip EHCI controller.
+ It is needed for high-speed (480Mbit/sec) USB 2.0 device
+ support.
+
+config USB_EHCI_ATH79
+ bool "EHCI support for AR7XXX/AR9XXX SoCs (DEPRECATED)"
+ depends on (SOC_AR71XX || SOC_AR724X || SOC_AR913X || SOC_AR933X)
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_EHCI_HCD_PLATFORM
+ default y
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_EHCI_HCD_PLATFORM instead.
+
+ Enables support for the built-in EHCI controller present
+ on the Atheros AR7XXX/AR9XXX SoCs.
+
+config USB_EHCI_HCD_PLATFORM
+ tristate "Generic EHCI driver for a platform device"
+ default n
+ ---help---
+ Adds an EHCI host driver for a generic platform device, which
+ provides a memory space and an irq.
+
+ If unsure, say N.
+
+config USB_OCTEON_EHCI
+ bool "Octeon on-chip EHCI support"
+ depends on CAVIUM_OCTEON_SOC
+ default n
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ help
+ Enable support for the Octeon II SOC's on-chip EHCI
+ controller. It is needed for high-speed (480Mbit/sec)
+ USB 2.0 device support. All CN6XXX based chips with USB are
+ supported.
+
+endif # USB_EHCI_HCD
+
config USB_OXU210HP_HCD
tristate "OXU210HP HCD support"
- depends on USB
---help---
The OXU210HP is an USB host/OTG/device controller. Enable this
option if your board has this chip. If unsure, say N.
@@ -111,7 +296,6 @@ config USB_OXU210HP_HCD
config USB_ISP116X_HCD
tristate "ISP116X HCD support"
- depends on USB
---help---
The ISP1160 and ISP1161 chips are USB host controllers. Enable this
option if your board has this chip. If unsure, say N.
@@ -123,7 +307,6 @@ config USB_ISP116X_HCD
config USB_ISP1760_HCD
tristate "ISP 1760 HCD support"
- depends on USB && EXPERIMENTAL && (PCI || PPC_OF)
---help---
The ISP1760 chip is a USB 2.0 host controller.
@@ -136,11 +319,50 @@ config USB_ISP1760_HCD
To compile this driver as a module, choose M here: the
module will be called isp1760.
+config USB_ISP1362_HCD
+ tristate "ISP1362 HCD support"
+ ---help---
+ Supports the Philips ISP1362 chip as a host controller
+
+ This driver does not support isochronous transfers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called isp1362-hcd.
+
+config USB_FUSBH200_HCD
+ tristate "FUSBH200 HCD support"
+ depends on USB
+ ---help---
+ Faraday FUSBH200 is designed to meet USB2.0 EHCI specification
+ with minor modification.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fusbh200-hcd.
+
+config USB_FOTG210_HCD
+ tristate "FOTG210 HCD support"
+ depends on USB
+ ---help---
+ Faraday FOTG210 is an OTG controller which can be configured as
+ an USB2.0 host. It is designed to meet USB2.0 EHCI specification
+ with minor modification.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fotg210-hcd.
+
+config USB_MAX3421_HCD
+ tristate "MAX3421 HCD (USB-over-SPI) support"
+ depends on USB && SPI
+ ---help---
+ The Maxim MAX3421E chip supports standard USB 2.0-compliant
+ full-speed devices either in host or peripheral mode. This
+ driver supports the host-mode of the MAX3421E only.
+
+ To compile this driver as a module, choose M here: the module will
+ be called max3421-hcd.
+
config USB_OHCI_HCD
- tristate "OHCI HCD support"
- depends on USB && USB_ARCH_HAS_OHCI
- select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
- select USB_OTG_UTILS if ARCH_OMAP
+ tristate "OHCI HCD (USB 1.1) support"
---help---
The Open Host Controller Interface (OHCI) is a standard for accessing
USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -154,40 +376,112 @@ config USB_OHCI_HCD
To compile this driver as a module, choose M here: the
module will be called ohci-hcd.
-config USB_OHCI_HCD_PPC_SOC
- bool "OHCI support for on-chip PPC USB controller"
- depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
+if USB_OHCI_HCD
+
+config USB_OHCI_HCD_OMAP1
+ tristate "OHCI support for OMAP1/2 chips"
+ depends on ARCH_OMAP1
+ depends on ISP1301_OMAP || !(MACH_OMAP_H2 || MACH_OMAP_H3)
+ default y
+ ---help---
+ Enables support for the OHCI controller on OMAP1/2 chips.
+
+config USB_OHCI_HCD_SPEAR
+ tristate "Support for ST SPEAr on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ ST SPEAr chips.
+
+config USB_OHCI_HCD_S3C2410
+ tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ S3C24xx/S3C64xx chips.
+
+config USB_OHCI_HCD_LPC32XX
+ tristate "Support for LPC on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && ARCH_LPC32XX
+ depends on USB_ISP1301
default y
- select USB_OHCI_BIG_ENDIAN_DESC
- select USB_OHCI_BIG_ENDIAN_MMIO
---help---
- Enables support for the USB controller on the MPC52xx or
- STB03xxx processor chip. If unsure, say Y.
+ Enables support for the on-chip OHCI controller on
+ NXP chips.
-config USB_OHCI_HCD_PPC_OF
- bool "OHCI support for PPC USB controller on OF platform bus"
- depends on USB_OHCI_HCD && PPC_OF
+config USB_OHCI_HCD_PXA27X
+ tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && (PXA27x || PXA3xx)
default y
---help---
- Enables support for the USB controller PowerPC present on the
- OpenFirmware platform bus.
+ Enables support for the on-chip OHCI controller on
+ PXA27x/PXA3xx chips.
+
+config USB_OHCI_HCD_AT91
+ tristate "Support for Atmel on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ Atmel chips.
+
+config USB_OHCI_HCD_OMAP3
+ tristate "OHCI support for OMAP3 and later chips"
+ depends on (ARCH_OMAP3 || ARCH_OMAP4)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ OMAP3 and later chips.
-config USB_OHCI_HCD_PPC_OF_BE
- bool "Support big endian HC"
- depends on USB_OHCI_HCD_PPC_OF
+config USB_OHCI_HCD_DAVINCI
+ bool "OHCI support for TI DaVinci DA8xx"
+ depends on ARCH_DAVINCI_DA8XX
+ depends on USB_OHCI_HCD=y
+ default y
+ help
+ Enables support for the DaVinci DA8xx integrated OHCI
+ controller. This driver cannot currently be a loadable
+ module because it lacks a proper PHY abstraction.
+
+config USB_OHCI_ATH79
+ bool "USB OHCI support for the Atheros AR71XX/AR7240 SoCs (DEPRECATED)"
+ depends on (SOC_AR71XX || SOC_AR724X)
+ select USB_OHCI_HCD_PLATFORM
default y
+ help
+ This option is deprecated now and the driver was removed, use
+ USB_OHCI_HCD_PLATFORM instead.
+
+ Enables support for the built-in OHCI controller present on the
+ Atheros AR71XX/AR7240 SoCs.
+
+config USB_OHCI_HCD_PPC_OF_BE
+ bool "OHCI support for OF platform bus (big endian)"
+ depends on PPC_OF
select USB_OHCI_BIG_ENDIAN_DESC
select USB_OHCI_BIG_ENDIAN_MMIO
+ ---help---
+ Enables support for big-endian USB controllers present on the
+ OpenFirmware platform bus.
config USB_OHCI_HCD_PPC_OF_LE
- bool "Support little endian HC"
- depends on USB_OHCI_HCD_PPC_OF
- default n
+ bool "OHCI support for OF platform bus (little endian)"
+ depends on PPC_OF
select USB_OHCI_LITTLE_ENDIAN
+ ---help---
+ Enables support for little-endian USB controllers present on the
+ OpenFirmware platform bus.
+
+config USB_OHCI_HCD_PPC_OF
+ bool
+ depends on PPC_OF
+ default USB_OHCI_HCD_PPC_OF_BE || USB_OHCI_HCD_PPC_OF_LE
config USB_OHCI_HCD_PCI
- bool "OHCI support for PCI-bus USB controllers"
- depends on USB_OHCI_HCD && PCI && (STB03xxx || PPC_MPC52xx || USB_OHCI_HCD_PPC_OF)
+ tristate "OHCI support for PCI-bus USB controllers"
+ depends on PCI
default y
select USB_OHCI_LITTLE_ENDIAN
---help---
@@ -195,10 +489,15 @@ config USB_OHCI_HCD_PCI
If unsure, say Y.
config USB_OHCI_HCD_SSB
- bool "OHCI support for Broadcom SSB OHCI core"
- depends on USB_OHCI_HCD && (SSB = y || SSB = USB_OHCI_HCD) && EXPERIMENTAL
+ bool "OHCI support for Broadcom SSB OHCI core (DEPRECATED)"
+ depends on (SSB = y || SSB = USB_OHCI_HCD)
+ select USB_HCD_SSB
+ select USB_OHCI_HCD_PLATFORM
default n
---help---
+ This option is deprecated now and the driver was removed, use
+ USB_HCD_SSB and USB_OHCI_HCD_PLATFORM instead.
+
Support for the Sonics Silicon Backplane (SSB) attached
Broadcom USB OHCI core.
@@ -207,25 +506,59 @@ config USB_OHCI_HCD_SSB
If unsure, say N.
-config USB_OHCI_BIG_ENDIAN_DESC
- bool
- depends on USB_OHCI_HCD
- default n
+config USB_OHCI_SH
+ bool "OHCI support for SuperH USB controller (DEPRECATED)"
+ depends on SUPERH
+ select USB_OHCI_HCD_PLATFORM
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_OHCI_HCD_PLATFORM instead.
-config USB_OHCI_BIG_ENDIAN_MMIO
- bool
- depends on USB_OHCI_HCD
+ Enables support for the on-chip OHCI controller on the SuperH.
+ If you use the PCI OHCI controller, this option is not necessary.
+
+config USB_OHCI_EXYNOS
+ tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on PLAT_S5P || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+
+config USB_CNS3XXX_OHCI
+ bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
+ depends on ARCH_CNS3XXX
+ select USB_OHCI_HCD_PLATFORM
+ ---help---
+ This option is deprecated now and the driver was removed, use
+ USB_OHCI_HCD_PLATFORM instead.
+
+ Enable support for the CNS3XXX SOC's on-chip OHCI controller.
+ It is needed for low-speed USB 1.0 device support.
+
+config USB_OHCI_HCD_PLATFORM
+ tristate "Generic OHCI driver for a platform device"
default n
+ ---help---
+ Adds an OHCI host driver for a generic platform device, which
+ provides a memory space and an irq.
-config USB_OHCI_LITTLE_ENDIAN
- bool
- depends on USB_OHCI_HCD
- default n if STB03xxx || PPC_MPC52xx
- default y
+ If unsure, say N.
+
+config USB_OCTEON_OHCI
+ bool "Octeon on-chip OHCI support"
+ depends on CAVIUM_OCTEON_SOC
+ default USB_OCTEON_EHCI
+ select USB_OHCI_BIG_ENDIAN_MMIO
+ select USB_OHCI_LITTLE_ENDIAN
+ help
+ Enable support for the Octeon II SOC's on-chip OHCI
+ controller. It is needed for low-speed USB 1.0 device
+ support. All CN6XXX based chips with USB are supported.
+
+endif # USB_OHCI_HCD
config USB_UHCI_HCD
tristate "UHCI HCD (most Intel and VIA) support"
- depends on USB && PCI
+ depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC
---help---
The Universal Host Controller Interface is a standard by Intel for
accessing the USB hardware in the PC (which is also called the USB
@@ -234,14 +567,31 @@ config USB_UHCI_HCD
with Intel PCI chipsets (like intel 430TX, 440FX, 440LX, 440BX,
i810, i820) conform to this standard. Also all VIA PCI chipsets
(like VIA VP2, VP3, MVP3, Apollo Pro, Apollo Pro II or Apollo Pro
- 133). If unsure, say Y.
+ 133) and LEON/GRLIB SoCs with the GRUSBHC controller.
+ If unsure, say Y.
To compile this driver as a module, choose M here: the
module will be called uhci-hcd.
+config USB_UHCI_SUPPORT_NON_PCI_HC
+ bool
+ default y if (SPARC_LEON || USB_UHCI_PLATFORM)
+
+config USB_UHCI_PLATFORM
+ bool
+ default y if ARCH_VT8500
+
+config USB_UHCI_BIG_ENDIAN_MMIO
+ bool
+ default y if SPARC_LEON
+
+config USB_UHCI_BIG_ENDIAN_DESC
+ bool
+ default y if SPARC_LEON
+
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
- depends on USB && OF_GPIO && QE_GPIO && QUICC_ENGINE
+ depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
select FSL_GTM
select QE_USB
help
@@ -254,12 +604,11 @@ config FHCI_DEBUG
depends on USB_FHCI_HCD && DEBUG_FS
help
Say "y" to see some FHCI debug information and statistics
- throught debugfs.
+ through debugfs.
config USB_U132_HCD
tristate "Elan U132 Adapter Host Controller"
- depends on USB && USB_FTDI_ELAN
- default M
+ depends on USB_FTDI_ELAN
help
The U132 adapter is a USB to CardBus adapter specifically designed
for PC cards that contain an OHCI host controller. Typical PC cards
@@ -286,7 +635,6 @@ config USB_U132_HCD
config USB_SL811_HCD
tristate "SL811HS HCD support"
- depends on USB
help
The SL811HS is a single-port USB controller that supports either
host side or peripheral side roles. Enable this option if your
@@ -296,6 +644,16 @@ config USB_SL811_HCD
To compile this driver as a module, choose M here: the
module will be called sl811-hcd.
+config USB_SL811_HCD_ISO
+ bool "partial ISO support"
+ depends on USB_SL811_HCD
+ help
+ The driver doesn't support iso_frame_desc (yet), but for some simple
+ devices that just queue one ISO frame per URB, then ISO transfers
+ "should" work using the normal urb status fields.
+
+ If unsure, say N.
+
config USB_SL811_CS
tristate "CF/PCMCIA support for SL811HS HCD"
depends on USB_SL811_HCD && PCMCIA
@@ -308,7 +666,6 @@ config USB_SL811_CS
config USB_R8A66597_HCD
tristate "R8A66597 HCD support"
- depends on USB
help
The R8A66597 is a USB 2.0 host and peripheral controller.
@@ -318,17 +675,21 @@ config USB_R8A66597_HCD
To compile this driver as a module, choose M here: the
module will be called r8a66597-hcd.
-config SUPERH_ON_CHIP_R8A66597
- boolean "Enable SuperH on-chip R8A66597 USB"
- depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723)
+config USB_RENESAS_USBHS_HCD
+ tristate "Renesas USBHS HCD support"
+ depends on USB_RENESAS_USBHS
help
- This driver enables support for the on-chip R8A66597 in the
- SH7366 and SH7723 processors.
+ The Renesas USBHS is a USB 2.0 host and peripheral controller.
+
+ Enable this option if your board has this chip, and you want
+ to use it as a host controller. If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called renesas-usbhs.
config USB_WHCI_HCD
- tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- depends on PCI && USB
+ tristate "Wireless USB Host Controller Interface (WHCI) driver"
+ depends on PCI && USB && UWB
select USB_WUSB
select UWB_WHCI
help
@@ -339,9 +700,8 @@ config USB_WHCI_HCD
will be called "whci-hcd".
config USB_HWA_HCD
- tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- depends on USB
+ tristate "Host Wire Adapter (HWA) driver"
+ depends on UWB
select USB_WUSB
select UWB_HWA
help
@@ -352,3 +712,60 @@ config USB_HWA_HCD
To compile this driver a module, choose M here: the module
will be called "hwa-hc".
+
+config USB_IMX21_HCD
+ tristate "i.MX21 HCD support"
+ depends on ARM && ARCH_MXC
+ help
+ This driver enables support for the on-chip USB host in the
+ i.MX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
+
+
+
+config USB_OCTEON2_COMMON
+ bool
+ default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
+
+config USB_HCD_BCMA
+ tristate "BCMA usb host driver"
+ depends on BCMA
+ select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+ select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
+ help
+ Enable support for the EHCI and OCHI host controller on an bcma bus.
+ It converts the bcma driver into two platform device drivers
+ for ehci and ohci.
+
+ If unsure, say N.
+
+config USB_HCD_SSB
+ tristate "SSB usb host driver"
+ depends on SSB
+ select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+ select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
+ help
+ Enable support for the EHCI and OCHI host controller on an bcma bus.
+ It converts the bcma driver into two platform device drivers
+ for ehci and ohci.
+
+ If unsure, say N.
+
+config USB_HCD_TEST_MODE
+ bool "HCD test mode support"
+ ---help---
+ Say 'Y' to enable additional software test modes that may be
+ supported by the host controller drivers.
+
+ One such test mode is the Embedded High-speed Host Electrical Test
+ (EHSET) for EHCI host controller hardware, specifically the "Single
+ Step Set Feature" test. Typically this will be enabled for On-the-Go
+ or embedded hosts that need to undergo USB-IF compliance testing with
+ the aid of special testing hardware. In the future, this may expand
+ to include other tests that require support from a HCD driver.
+
+ This option is of interest only to developers who need to validate
+ their USB hardware designs. It is not needed for normal use. If
+ unsure, say N.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f163571e33d..af89a903d97 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -2,15 +2,26 @@
# Makefile for USB Host Controller Drivers
#
-ifeq ($(CONFIG_USB_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
+# tell define_trace.h where to find the xhci trace header
+CFLAGS_xhci-trace.o := -I$(src)
+
+isp1760-y := isp1760-hcd.o isp1760-if.o
+
+fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
+fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
+
+fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
+
+xhci-hcd-y := xhci.o xhci-mem.o
+xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
+xhci-hcd-y += xhci-trace.o
+xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
-isp1760-objs := isp1760-hcd.o isp1760-if.o
-fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
- fhci-tds.o fhci-sched.o
-ifeq ($(CONFIG_FHCI_DEBUG),y)
-fhci-objs += fhci-dbg.o
+ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
+ xhci-hcd-y += xhci-plat.o
+ifneq ($(CONFIG_USB_XHCI_MVEBU), )
+ xhci-hcd-y += xhci-mvebu.o
+endif
endif
obj-$(CONFIG_USB_WHCI_HCD) += whci/
@@ -18,14 +29,48 @@ obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
+obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
+obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
+obj-$(CONFIG_USB_EHCI_MXC) += ehci-mxc.o
+obj-$(CONFIG_USB_EHCI_HCD_OMAP) += ehci-omap.o
+obj-$(CONFIG_USB_EHCI_HCD_ORION) += ehci-orion.o
+obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
+obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
+obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
+obj-$(CONFIG_USB_EHCI_MSM) += ehci-msm.o
+obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
+obj-$(CONFIG_USB_W90X900_EHCI) += ehci-w90x900.o
+
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
+obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
+
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
+obj-$(CONFIG_USB_OHCI_HCD_PCI) += ohci-pci.o
+obj-$(CONFIG_USB_OHCI_HCD_PLATFORM) += ohci-platform.o
+obj-$(CONFIG_USB_OHCI_EXYNOS) += ohci-exynos.o
+obj-$(CONFIG_USB_OHCI_HCD_OMAP1) += ohci-omap.o
+obj-$(CONFIG_USB_OHCI_HCD_OMAP3) += ohci-omap3.o
+obj-$(CONFIG_USB_OHCI_HCD_SPEAR) += ohci-spear.o
+obj-$(CONFIG_USB_OHCI_HCD_AT91) += ohci-at91.o
+obj-$(CONFIG_USB_OHCI_HCD_S3C2410) += ohci-s3c2410.o
+obj-$(CONFIG_USB_OHCI_HCD_LPC32XX) += ohci-nxp.o
+obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
+
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
+obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
+obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
+obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
+obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
+obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
+obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
+obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
+obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
+obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
new file mode 100644
index 00000000000..205f4a33658
--- /dev/null
+++ b/drivers/usb/host/bcma-hcd.c
@@ -0,0 +1,333 @@
+/*
+ * Broadcom specific Advanced Microcontroller Bus
+ * Broadcom USB-core driver (BCMA bus glue)
+ *
+ * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Based on ssb-ohci driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Derived from the USBcore related parts of Broadcom-SB
+ * Copyright 2005-2011 Broadcom Corporation
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+#include <linux/bcma/bcma.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Common USB driver for BCMA Bus");
+MODULE_LICENSE("GPL");
+
+struct bcma_hcd_device {
+ struct platform_device *ehci_dev;
+ struct platform_device *ohci_dev;
+};
+
+/* Wait for bitmask in a register to get set or cleared.
+ * timeout is in units of ten-microseconds.
+ */
+static int bcma_wait_bits(struct bcma_device *dev, u16 reg, u32 bitmask,
+ int timeout)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < timeout; i++) {
+ val = bcma_read32(dev, reg);
+ if ((val & bitmask) == bitmask)
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void bcma_hcd_4716wa(struct bcma_device *dev)
+{
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+ /* Work around for 4716 failures. */
+ if (dev->bus->chipinfo.id == 0x4716) {
+ u32 tmp;
+
+ tmp = bcma_cpu_clock(&dev->bus->drv_mips);
+ if (tmp >= 480000000)
+ tmp = 0x1846b; /* set CDR to 0x11(fast) */
+ else if (tmp == 453000000)
+ tmp = 0x1046b; /* set CDR to 0x10(slow) */
+ else
+ tmp = 0;
+
+ /* Change Shim mdio control reg to fix host not acking at
+ * high frequencies
+ */
+ if (tmp) {
+ bcma_write32(dev, 0x524, 0x1); /* write sel to enable */
+ udelay(500);
+
+ bcma_write32(dev, 0x524, tmp);
+ udelay(500);
+ bcma_write32(dev, 0x524, 0x4ab);
+ udelay(500);
+ bcma_read32(dev, 0x528);
+ bcma_write32(dev, 0x528, 0x80000000);
+ }
+ }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+}
+
+/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
+static void bcma_hcd_init_chip(struct bcma_device *dev)
+{
+ u32 tmp;
+
+ /*
+ * USB 2.0 special considerations:
+ *
+ * 1. Since the core supports both OHCI and EHCI functions, it must
+ * only be reset once.
+ *
+ * 2. In addition to the standard SI reset sequence, the Host Control
+ * Register must be programmed to bring the USB core and various
+ * phy components out of reset.
+ */
+ if (!bcma_core_is_enabled(dev)) {
+ bcma_core_enable(dev, 0);
+ mdelay(10);
+ if (dev->id.rev >= 5) {
+ /* Enable Misc PLL */
+ tmp = bcma_read32(dev, 0x1e0);
+ tmp |= 0x100;
+ bcma_write32(dev, 0x1e0, tmp);
+ if (bcma_wait_bits(dev, 0x1e0, 1 << 24, 100))
+ printk(KERN_EMERG "Failed to enable misc PPL!\n");
+
+ /* Take out of resets */
+ bcma_write32(dev, 0x200, 0x4ff);
+ udelay(25);
+ bcma_write32(dev, 0x200, 0x6ff);
+ udelay(25);
+
+ /* Make sure digital and AFE are locked in USB PHY */
+ bcma_write32(dev, 0x524, 0x6b);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+ udelay(50);
+ bcma_write32(dev, 0x524, 0xab);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+ udelay(50);
+ bcma_write32(dev, 0x524, 0x2b);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+ udelay(50);
+ bcma_write32(dev, 0x524, 0x10ab);
+ udelay(50);
+ tmp = bcma_read32(dev, 0x524);
+
+ if (bcma_wait_bits(dev, 0x528, 0xc000, 10000)) {
+ tmp = bcma_read32(dev, 0x528);
+ printk(KERN_EMERG
+ "USB20H mdio_rddata 0x%08x\n", tmp);
+ }
+ bcma_write32(dev, 0x528, 0x80000000);
+ tmp = bcma_read32(dev, 0x314);
+ udelay(265);
+ bcma_write32(dev, 0x200, 0x7ff);
+ udelay(10);
+
+ /* Take USB and HSIC out of non-driving modes */
+ bcma_write32(dev, 0x510, 0);
+ } else {
+ bcma_write32(dev, 0x200, 0x7ff);
+
+ udelay(1);
+ }
+
+ bcma_hcd_4716wa(dev);
+ }
+}
+
+static const struct usb_ehci_pdata ehci_pdata = {
+};
+
+static const struct usb_ohci_pdata ohci_pdata = {
+};
+
+static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, bool ohci, u32 addr)
+{
+ struct platform_device *hci_dev;
+ struct resource hci_res[2];
+ int ret = -ENOMEM;
+
+ memset(hci_res, 0, sizeof(hci_res));
+
+ hci_res[0].start = addr;
+ hci_res[0].end = hci_res[0].start + 0x1000 - 1;
+ hci_res[0].flags = IORESOURCE_MEM;
+
+ hci_res[1].start = dev->irq;
+ hci_res[1].flags = IORESOURCE_IRQ;
+
+ hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
+ "ehci-platform" , 0);
+ if (!hci_dev)
+ return NULL;
+
+ hci_dev->dev.parent = &dev->dev;
+ hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
+
+ ret = platform_device_add_resources(hci_dev, hci_res,
+ ARRAY_SIZE(hci_res));
+ if (ret)
+ goto err_alloc;
+ if (ohci)
+ ret = platform_device_add_data(hci_dev, &ohci_pdata,
+ sizeof(ohci_pdata));
+ else
+ ret = platform_device_add_data(hci_dev, &ehci_pdata,
+ sizeof(ehci_pdata));
+ if (ret)
+ goto err_alloc;
+ ret = platform_device_add(hci_dev);
+ if (ret)
+ goto err_alloc;
+
+ return hci_dev;
+
+err_alloc:
+ platform_device_put(hci_dev);
+ return ERR_PTR(ret);
+}
+
+static int bcma_hcd_probe(struct bcma_device *dev)
+{
+ int err;
+ u16 chipid_top;
+ u32 ohci_addr;
+ struct bcma_hcd_device *usb_dev;
+ struct bcma_chipinfo *chipinfo;
+
+ chipinfo = &dev->bus->chipinfo;
+ /* USBcores are only connected on embedded devices. */
+ chipid_top = (chipinfo->id & 0xFF00);
+ if (chipid_top != 0x4700 && chipid_top != 0x5300)
+ return -ENODEV;
+
+ /* TODO: Probably need checks here; is the core connected? */
+
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
+ return -EOPNOTSUPP;
+
+ usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
+ if (!usb_dev)
+ return -ENOMEM;
+
+ bcma_hcd_init_chip(dev);
+
+ /* In AI chips EHCI is addrspace 0, OHCI is 1 */
+ ohci_addr = dev->addr1;
+ if ((chipinfo->id == 0x5357 || chipinfo->id == 0x4749)
+ && chipinfo->rev == 0)
+ ohci_addr = 0x18009000;
+
+ usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, true, ohci_addr);
+ if (IS_ERR(usb_dev->ohci_dev)) {
+ err = PTR_ERR(usb_dev->ohci_dev);
+ goto err_free_usb_dev;
+ }
+
+ usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, false, dev->addr);
+ if (IS_ERR(usb_dev->ehci_dev)) {
+ err = PTR_ERR(usb_dev->ehci_dev);
+ goto err_unregister_ohci_dev;
+ }
+
+ bcma_set_drvdata(dev, usb_dev);
+ return 0;
+
+err_unregister_ohci_dev:
+ platform_device_unregister(usb_dev->ohci_dev);
+err_free_usb_dev:
+ kfree(usb_dev);
+ return err;
+}
+
+static void bcma_hcd_remove(struct bcma_device *dev)
+{
+ struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
+ struct platform_device *ohci_dev = usb_dev->ohci_dev;
+ struct platform_device *ehci_dev = usb_dev->ehci_dev;
+
+ if (ohci_dev)
+ platform_device_unregister(ohci_dev);
+ if (ehci_dev)
+ platform_device_unregister(ehci_dev);
+
+ bcma_core_disable(dev, 0);
+}
+
+static void bcma_hcd_shutdown(struct bcma_device *dev)
+{
+ bcma_core_disable(dev, 0);
+}
+
+#ifdef CONFIG_PM
+
+static int bcma_hcd_suspend(struct bcma_device *dev)
+{
+ bcma_core_disable(dev, 0);
+
+ return 0;
+}
+
+static int bcma_hcd_resume(struct bcma_device *dev)
+{
+ bcma_core_enable(dev, 0);
+
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define bcma_hcd_suspend NULL
+#define bcma_hcd_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct bcma_device_id bcma_hcd_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
+
+static struct bcma_driver bcma_hcd_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bcma_hcd_table,
+ .probe = bcma_hcd_probe,
+ .remove = bcma_hcd_remove,
+ .shutdown = bcma_hcd_shutdown,
+ .suspend = bcma_hcd_suspend,
+ .resume = bcma_hcd_resume,
+};
+
+static int __init bcma_hcd_init(void)
+{
+ return bcma_driver_register(&bcma_hcd_driver);
+}
+module_init(bcma_hcd_init);
+
+static void __exit bcma_hcd_exit(void)
+{
+ bcma_driver_unregister(&bcma_hcd_driver);
+}
+module_exit(bcma_hcd_exit);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
new file mode 100644
index 00000000000..ec9f7b75d49
--- /dev/null
+++ b/drivers/usb/host/ehci-atmel.c
@@ -0,0 +1,223 @@
+/*
+ * Driver for EHCI UHP on Atmel chips
+ *
+ * Copyright (C) 2009 Atmel Corporation,
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Based on various ehci-*.c drivers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI Atmel driver"
+
+static const char hcd_name[] = "ehci-atmel";
+static struct hc_driver __read_mostly ehci_atmel_hc_driver;
+
+/* interface and function clocks */
+static struct clk *iclk, *fclk, *uclk;
+static int clocked;
+
+/*-------------------------------------------------------------------------*/
+
+static void atmel_start_clock(void)
+{
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(uclk, 48000000);
+ clk_prepare_enable(uclk);
+ }
+ clk_prepare_enable(iclk);
+ clk_prepare_enable(fclk);
+ clocked = 1;
+}
+
+static void atmel_stop_clock(void)
+{
+ clk_disable_unprepare(fclk);
+ clk_disable_unprepare(iclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_disable_unprepare(uclk);
+ clocked = 0;
+}
+
+static void atmel_start_ehci(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "start\n");
+ atmel_start_clock();
+}
+
+static void atmel_stop_ehci(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "stop\n");
+ atmel_stop_clock();
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int ehci_atmel_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ const struct hc_driver *driver = &ehci_atmel_hc_driver;
+ struct resource *res;
+ struct ehci_hcd *ehci;
+ int irq;
+ int retval;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_debug("Initializing Atmel-SoC USB Host Controller\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ retval = -ENODEV;
+ goto fail_create_hcd;
+ }
+
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail_create_hcd;
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto fail_request_resource;
+ }
+
+ iclk = devm_clk_get(&pdev->dev, "ehci_clk");
+ if (IS_ERR(iclk)) {
+ dev_err(&pdev->dev, "Error getting interface clock\n");
+ retval = -ENOENT;
+ goto fail_request_resource;
+ }
+ fclk = devm_clk_get(&pdev->dev, "uhpck");
+ if (IS_ERR(fclk)) {
+ dev_err(&pdev->dev, "Error getting function clock\n");
+ retval = -ENOENT;
+ goto fail_request_resource;
+ }
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ uclk = devm_clk_get(&pdev->dev, "usb_clk");
+ if (IS_ERR(uclk)) {
+ dev_err(&pdev->dev, "failed to get uclk\n");
+ retval = PTR_ERR(uclk);
+ goto fail_request_resource;
+ }
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ /* registers start at offset 0x0 */
+ ehci->caps = hcd->regs;
+
+ atmel_start_ehci(pdev);
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval)
+ goto fail_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
+
+ return retval;
+
+fail_add_hcd:
+ atmel_stop_ehci(pdev);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(&pdev->dev, "init %s fail, %d\n",
+ dev_name(&pdev->dev), retval);
+
+ return retval;
+}
+
+static int ehci_atmel_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ atmel_stop_ehci(pdev);
+ fclk = iclk = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ehci_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g45-ehci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_ehci_dt_ids);
+#endif
+
+static struct platform_driver ehci_atmel_driver = {
+ .probe = ehci_atmel_drv_probe,
+ .remove = ehci_atmel_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "atmel-ehci",
+ .of_match_table = of_match_ptr(atmel_ehci_dt_ids),
+ },
+};
+
+static int __init ehci_atmel_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ehci_init_driver(&ehci_atmel_hc_driver, NULL);
+ return platform_driver_register(&ehci_atmel_driver);
+}
+module_init(ehci_atmel_init);
+
+static void __exit ehci_atmel_cleanup(void)
+{
+ platform_driver_unregister(&ehci_atmel_driver);
+}
+module_exit(ehci_atmel_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:atmel-ehci");
+MODULE_AUTHOR("Nicolas Ferre");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
deleted file mode 100644
index bf69f473910..00000000000
--- a/drivers/usb/host/ehci-au1xxx.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * EHCI HCD (Host Controller Driver) for USB.
- *
- * Bus Glue for AMD Alchemy Au1xxx
- *
- * Based on "ohci-au1xxx.c" by Matt Porter <mporter@kernel.crashing.org>
- *
- * Modified for AMD Alchemy Au1200 EHC
- * by K.Boge <karsten.boge@amd.com>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-#include <asm/mach-au1x00/au1000.h>
-
-#define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG)
-#define USB_MCFG_PFEN (1<<31)
-#define USB_MCFG_RDCOMB (1<<30)
-#define USB_MCFG_SSDEN (1<<23)
-#define USB_MCFG_PHYPLLEN (1<<19)
-#define USB_MCFG_UCECLKEN (1<<18)
-#define USB_MCFG_EHCCLKEN (1<<17)
-#ifdef CONFIG_DMA_COHERENT
-#define USB_MCFG_UCAM (1<<7)
-#else
-#define USB_MCFG_UCAM (0)
-#endif
-#define USB_MCFG_EBMEN (1<<3)
-#define USB_MCFG_EMEMEN (1<<2)
-
-#define USBH_ENABLE_CE (USB_MCFG_PHYPLLEN | USB_MCFG_EHCCLKEN)
-#define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \
- USBH_ENABLE_CE | USB_MCFG_SSDEN | \
- USB_MCFG_UCAM | USB_MCFG_EBMEN | \
- USB_MCFG_EMEMEN)
-
-#define USBH_DISABLE (USB_MCFG_EBMEN | USB_MCFG_EMEMEN)
-
-extern int usb_disabled(void);
-
-static void au1xxx_start_ehc(void)
-{
- /* enable clock to EHCI block and HS PHY PLL*/
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* enable EHCI mmio */
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-}
-
-static void au1xxx_stop_ehc(void)
-{
- unsigned long c;
-
- /* Disable mem */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* Disable EHC clock. If the HS PHY is unused disable it too. */
- c = au_readl(USB_HOST_CONFIG) & ~USB_MCFG_EHCCLKEN;
- if (!(c & USB_MCFG_UCECLKEN)) /* UDC disabled? */
- c &= ~USB_MCFG_PHYPLLEN; /* yes: disable HS PHY PLL */
- au_writel(c, USB_HOST_CONFIG);
- au_sync();
-}
-
-static const struct hc_driver ehci_au1xxx_hc_driver = {
- .description = hcd_name,
- .product_desc = "Au1xxx EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
-
- /*
- * basic lifecycle operations
- *
- * FIXME -- ehci_init() doesn't do enough here.
- * See ehci-ppc-soc for a complete implementation.
- */
- .reset = ehci_init,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-};
-
-static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
-#if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT)
- /* Au1200 AB USB does not support coherent memory */
- if (!(read_c0_prid() & 0xff)) {
- printk(KERN_INFO "%s: this is chip revision AB!\n", pdev->name);
- printk(KERN_INFO "%s: update your board or re-configure"
- " the kernel\n", pdev->name);
- return -ENODEV;
- }
-#endif
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
- hcd = usb_create_hcd(&ehci_au1xxx_hc_driver, &pdev->dev, "Au1xxx");
- if (!hcd)
- return -ENOMEM;
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed");
- ret = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed");
- ret = -ENOMEM;
- goto err2;
- }
-
- au1xxx_start_ehc();
-
- ehci = hcd_to_ehci(hcd);
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase));
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = readl(&ehci->caps->hcs_params);
-
- ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_DISABLED | IRQF_SHARED);
- if (ret == 0) {
- platform_set_drvdata(pdev, hcd);
- return ret;
- }
-
- au1xxx_stop_ehc();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ehci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- au1xxx_stop_ehc();
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
- pm_message_t message)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- unsigned long flags;
- int rc;
-
- return 0;
- rc = 0;
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(10);
-
- /* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
- */
- spin_lock_irqsave(&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
- ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- (void)ehci_readl(ehci, &ehci->regs->intr_enable);
-
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW) {
- ehci_halt(ehci);
- ehci_reset(ehci);
- }
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- au1xxx_stop_ehc();
-
-bail:
- spin_unlock_irqrestore(&ehci->lock, flags);
-
- // could save FLADJ in case of Vaux power loss
- // ... we'd only use it to handle clock skew
-
- return rc;
-}
-
-
-static int ehci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- au1xxx_start_ehc();
-
- // maybe restore FLADJ
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(100);
-
- /* Mark hardware accessible again as we are out of D3 state by now */
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- /* If CF is still set, we maintained PCI Vaux power.
- * Just undo the effect of ehci_pci_suspend().
- */
- if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
- int mask = INTR_MASK;
-
- if (!hcd->self.root_hub->do_remote_wakeup)
- mask &= ~STS_PCD;
- ehci_writel(ehci, mask, &ehci->regs->intr_enable);
- ehci_readl(ehci, &ehci->regs->intr_enable);
- return 0;
- }
-
- ehci_dbg(ehci, "lost power, restarting\n");
- usb_root_hub_lost_power(hcd->self.root_hub);
-
- /* Else reset, to cope with power loss or flush-to-storage
- * style "resume" having let BIOS kick in during reboot.
- */
- (void) ehci_halt(ehci);
- (void) ehci_reset(ehci);
-
- /* emptying the schedule aborts any urbs */
- spin_lock_irq(&ehci->lock);
- if (ehci->reclaim)
- end_unlink_async(ehci);
- ehci_work(ehci);
- spin_unlock_irq(&ehci->lock);
-
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
- ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
-
- /* here we "know" root ports should always stay powered */
- ehci_port_power(ehci, 1);
-
- hcd->state = HC_STATE_SUSPENDED;
-
- return 0;
-}
-
-#else
-#define ehci_hcd_au1xxx_drv_suspend NULL
-#define ehci_hcd_au1xxx_drv_resume NULL
-#endif
-
-static struct platform_driver ehci_hcd_au1xxx_driver = {
- .probe = ehci_hcd_au1xxx_drv_probe,
- .remove = ehci_hcd_au1xxx_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .suspend = ehci_hcd_au1xxx_drv_suspend,
- .resume = ehci_hcd_au1xxx_drv_resume,
- .driver = {
- .name = "au1xxx-ehci",
- .owner = THIS_MODULE,
- }
-};
-
-MODULE_ALIAS("platform:au1xxx-ehci");
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 7f4ace73d44..524cbf26d99 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -18,24 +18,7 @@
/* this file is part of ehci-hcd.c */
-#define ehci_dbg(ehci, fmt, args...) \
- dev_dbg (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-#define ehci_err(ehci, fmt, args...) \
- dev_err (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-#define ehci_info(ehci, fmt, args...) \
- dev_info (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-#define ehci_warn(ehci, fmt, args...) \
- dev_warn (ehci_to_hcd(ehci)->self.controller , fmt , ## args )
-
-#ifdef VERBOSE_DEBUG
-# define vdbg dbg
-# define ehci_vdbg ehci_dbg
-#else
-# define vdbg(fmt,args...) do { } while (0)
-# define ehci_vdbg(ehci, fmt, args...) do { } while (0)
-#endif
-
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
/* check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
@@ -79,7 +62,7 @@ static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
/* check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
@@ -98,13 +81,18 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
} else {
ehci_dbg (ehci,
- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
+ "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
label,
params,
HCC_ISOC_THRES(params),
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "",
- HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
+ HCC_LPM(params) ? " LPM" : "",
+ HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
+ HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
+ HCC_32FRAME_PERIODIC_LIST(params) ?
+ " 32 periodic list" : "");
}
}
#else
@@ -113,7 +101,7 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
static void __maybe_unused
dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
@@ -134,10 +122,11 @@ dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
static void __maybe_unused
dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{
+ struct ehci_qh_hw *hw = qh->hw;
+
ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
- qh, qh->hw_next, qh->hw_info1, qh->hw_info2,
- qh->hw_current);
- dbg_qtd ("overlay", ehci, (struct ehci_qtd *) &qh->hw_qtd_next);
+ qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
}
static void __maybe_unused
@@ -190,8 +179,9 @@ static int __maybe_unused
dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf (buf, len,
- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
label, label [0] ? " " : "", status,
+ (status & STS_PPCE_MASK) ? " PPCE" : "",
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
@@ -209,8 +199,9 @@ static int __maybe_unused
dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf (buf, len,
- "%s%sintrenable %02x%s%s%s%s%s%s",
+ "%s%sintrenable %02x%s%s%s%s%s%s%s",
label, label [0] ? " " : "", enable,
+ (enable & STS_PPCE_MASK) ? " PPCE" : "",
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
@@ -227,9 +218,15 @@ static int
dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
{
return scnprintf (buf, len,
- "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
+ "%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
+ "period=%s%s %s",
label, label [0] ? " " : "", command,
- (command & CMD_PARK) ? "park" : "(park)",
+ (command & CMD_HIRD) ? " HIRD" : "",
+ (command & CMD_PPCEE) ? " PPCEE" : "",
+ (command & CMD_FSP) ? " FSP" : "",
+ (command & CMD_ASPE) ? " ASPE" : "",
+ (command & CMD_PSPE) ? " PSPE" : "",
+ (command & CMD_PARK) ? " park" : "(park)",
CMD_PARK_CNT (command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
@@ -256,11 +253,22 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
}
return scnprintf (buf, len,
- "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
+ "%s%sport:%d status %06x %d %s%s%s%s%s%s "
+ "sig=%s%s%s%s%s%s%s%s%s%s%s",
label, label [0] ? " " : "", port, status,
+ status>>25,/*device address */
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
+ " ACK" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
+ " NYET" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
+ " STALL" : "",
+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
+ " ERR" : "",
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
+ (status & PORT_LPM) ? " LPM" : "",
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
@@ -293,7 +301,7 @@ static inline int __maybe_unused
dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
-#endif /* DEBUG */
+#endif /* CONFIG_DYNAMIC_DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(ehci, label, status) { \
@@ -326,9 +334,10 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
+static int debug_bandwidth_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
-static int debug_async_open(struct inode *, struct file *);
+
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
@@ -337,18 +346,28 @@ static const struct file_operations debug_async_fops = {
.open = debug_async_open,
.read = debug_output,
.release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_bandwidth_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_bandwidth_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
};
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
.read = debug_output,
.release = debug_close,
+ .llseek = default_llseek,
};
static const struct file_operations debug_registers_fops = {
.owner = THIS_MODULE,
.open = debug_registers_open,
.read = debug_output,
.release = debug_close,
+ .llseek = default_llseek,
};
static struct dentry *ehci_debug_root;
@@ -364,11 +383,11 @@ struct debug_buffer {
#define speed_char(info1) ({ char tmp; \
switch (info1 & (3 << 12)) { \
- case 0 << 12: tmp = 'f'; break; \
- case 1 << 12: tmp = 'l'; break; \
- case 2 << 12: tmp = 'h'; break; \
+ case QH_FULL_SPEED: tmp = 'f'; break; \
+ case QH_LOW_SPEED: tmp = 'l'; break; \
+ case QH_HIGH_SPEED: tmp = 'h'; break; \
default: tmp = '?'; break; \
- }; tmp; })
+ } tmp; })
static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
{
@@ -400,31 +419,32 @@ static void qh_lines (
char *next = *nextp;
char mark;
__le32 list_end = EHCI_LIST_END(ehci);
+ struct ehci_qh_hw *hw = qh->hw;
- if (qh->hw_qtd_next == list_end) /* NEC does this */
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
mark = '@';
else
- mark = token_mark(ehci, qh->hw_token);
+ mark = token_mark(ehci, hw->hw_token);
if (mark == '/') { /* qh_alt_next controls qh advance? */
- if ((qh->hw_alt_next & QTD_MASK(ehci))
- == ehci->async->hw_alt_next)
+ if ((hw->hw_alt_next & QTD_MASK(ehci))
+ == ehci->async->hw->hw_alt_next)
mark = '#'; /* blocked */
- else if (qh->hw_alt_next == list_end)
+ else if (hw->hw_alt_next == list_end)
mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
- scratch = hc32_to_cpup(ehci, &qh->hw_info1);
- hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &qh->hw_current) : 0;
+ scratch = hc32_to_cpup(ehci, &hw->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
temp = scnprintf (next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
- scratch, hc32_to_cpup(ehci, &qh->hw_info2),
- hc32_to_cpup(ehci, &qh->hw_token), mark,
- (cpu_to_hc32(ehci, QTD_TOGGLE) & qh->hw_token)
+ scratch, hc32_to_cpup(ehci, &hw->hw_info2),
+ hc32_to_cpup(ehci, &hw->hw_token), mark,
+ (cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token)
? "data1" : "data0",
- (hc32_to_cpup(ehci, &qh->hw_alt_next) >> 1) & 0x0f);
+ (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f);
size -= temp;
next += temp;
@@ -435,10 +455,10 @@ static void qh_lines (
mark = ' ';
if (hw_curr == td->qtd_dma)
mark = '*';
- else if (qh->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
+ else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
mark = '+';
else if (QTD_LENGTH (scratch)) {
- if (td->hw_alt_next == ehci->async->hw_alt_next)
+ if (td->hw_alt_next == ehci->async->hw->hw_alt_next)
mark = '#';
else if (td->hw_alt_next != list_end)
mark = '/';
@@ -497,19 +517,105 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
spin_lock_irqsave (&ehci->lock, flags);
for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
qh_lines (ehci, qh, &next, &size);
- if (ehci->reclaim && size > 0) {
- temp = scnprintf (next, size, "\nreclaim =\n");
+ if (!list_empty(&ehci->async_unlink) && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
size -= temp;
next += temp;
- for (qh = ehci->reclaim; size > 0 && qh; qh = qh->reclaim)
- qh_lines (ehci, qh, &next, &size);
+ list_for_each_entry(qh, &ehci->async_unlink, unlink_node) {
+ if (size <= 0)
+ break;
+ qh_lines(ehci, qh, &next, &size);
+ }
}
spin_unlock_irqrestore (&ehci->lock, flags);
return strlen(buf->output_buf);
}
+static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
+{
+ struct ehci_hcd *ehci;
+ struct ehci_tt *tt;
+ struct ehci_per_sched *ps;
+ unsigned temp, size;
+ char *next;
+ unsigned i;
+ u8 *bw;
+ u16 *bf;
+ u8 budget[EHCI_BANDWIDTH_SIZE];
+
+ ehci = hcd_to_ehci(bus_to_hcd(buf->bus));
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ spin_lock_irq(&ehci->lock);
+
+ /* Dump the HS bandwidth table */
+ temp = scnprintf(next, size,
+ "HS bandwidth allocation (us per microframe)\n");
+ size -= temp;
+ next += temp;
+ for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+ bw = &ehci->bandwidth[i];
+ temp = scnprintf(next, size,
+ "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+ i, bw[0], bw[1], bw[2], bw[3],
+ bw[4], bw[5], bw[6], bw[7]);
+ size -= temp;
+ next += temp;
+ }
+
+ /* Dump all the FS/LS tables */
+ list_for_each_entry(tt, &ehci->tt_list, tt_list) {
+ temp = scnprintf(next, size,
+ "\nTT %s port %d FS/LS bandwidth allocation (us per frame)\n",
+ dev_name(&tt->usb_tt->hub->dev),
+ tt->tt_port + !!tt->usb_tt->multi);
+ size -= temp;
+ next += temp;
+
+ bf = tt->bandwidth;
+ temp = scnprintf(next, size,
+ " %5u%5u%5u%5u%5u%5u%5u%5u\n",
+ bf[0], bf[1], bf[2], bf[3],
+ bf[4], bf[5], bf[6], bf[7]);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size,
+ "FS/LS budget (us per microframe)\n");
+ size -= temp;
+ next += temp;
+ compute_tt_budget(budget, tt);
+ for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+ bw = &budget[i];
+ temp = scnprintf(next, size,
+ "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+ i, bw[0], bw[1], bw[2], bw[3],
+ bw[4], bw[5], bw[6], bw[7]);
+ size -= temp;
+ next += temp;
+ }
+ list_for_each_entry(ps, &tt->ps_list, ps_list) {
+ temp = scnprintf(next, size,
+ "%s ep %02x: %4u @ %2u.%u+%u mask %04x\n",
+ dev_name(&ps->udev->dev),
+ ps->ep->desc.bEndpointAddress,
+ ps->tt_usecs,
+ ps->bw_phase, ps->phase_uf,
+ ps->bw_period, ps->cs_mask);
+ size -= temp;
+ next += temp;
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+
+ return next - buf->output_buf;
+}
+
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
@@ -550,12 +656,15 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
next += temp;
do {
+ struct ehci_qh_hw *hw;
+
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
+ hw = p.qh->hw;
temp = scnprintf (next, size, " qh%d-%04x/%p",
- p.qh->period,
+ p.qh->ps.period,
hc32_to_cpup(ehci,
- &p.qh->hw_info2)
+ &hw->hw_info2)
/* uframe masks */
& (QH_CMASK | QH_SMASK),
p.qh);
@@ -576,7 +685,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
/* show more info the first time around */
if (temp == seen_count) {
u32 scratch = hc32_to_cpup(ehci,
- &p.qh->hw_info1);
+ &hw->hw_info1);
struct ehci_qtd *qtd;
char *type = "";
@@ -600,7 +709,8 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
speed_char (scratch),
scratch & 0x007f,
(scratch >> 8) & 0x000f, type,
- p.qh->usecs, p.qh->c_usecs,
+ p.qh->ps.usecs,
+ p.qh->ps.c_usecs,
temp,
0x7ff & (scratch >> 16));
@@ -608,10 +718,8 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
seen [seen_count++].qh = p.qh;
} else
temp = 0;
- if (p.qh) {
- tag = Q_NEXT_TYPE(ehci, p.qh->hw_next);
- p = p.qh->qh_next;
- }
+ tag = Q_NEXT_TYPE(ehci, hw->hw_next);
+ p = p.qh->qh_next;
break;
case Q_TYPE_FSTN:
temp = scnprintf (next, size,
@@ -629,7 +737,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
case Q_TYPE_SITD:
temp = scnprintf (next, size,
" sitd%d-%04x/%p",
- p.sitd->stream->interval,
+ p.sitd->stream->ps.period,
hc32_to_cpup(ehci, &p.sitd->hw_uframe)
& 0x0000ffff,
p.sitd);
@@ -652,6 +760,21 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
}
#undef DBG_SCHED_LIMIT
+static const char *rh_state_string(struct ehci_hcd *ehci)
+{
+ switch (ehci->rh_state) {
+ case EHCI_RH_HALTED:
+ return "halted";
+ case EHCI_RH_SUSPENDED:
+ return "suspended";
+ case EHCI_RH_RUNNING:
+ return "running";
+ case EHCI_RH_STOPPING:
+ return "stopping";
+ }
+ return "?";
+}
+
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
struct usb_hcd *hcd;
@@ -669,7 +792,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
spin_lock_irqsave (&ehci->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
size = scnprintf (next, size,
"bus %s, device %s\n"
"%s\n"
@@ -681,21 +804,21 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
/* Capability Registers */
- i = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
temp = scnprintf (next, size,
"bus %s, device %s\n"
"%s\n"
- "EHCI %x.%02x, hcd state %d\n",
+ "EHCI %x.%02x, rh state %s\n",
hcd->self.controller->bus->name,
dev_name(hcd->self.controller),
hcd->product_desc,
- i >> 8, i & 0x0ff, hcd->state);
+ i >> 8, i & 0x0ff, rh_state_string(ehci));
size -= temp;
next += temp;
#ifdef CONFIG_PCI
/* EHCI 0.96 and later may have "extended capabilities" */
- if (hcd->self.controller->bus == &pci_bus_type) {
+ if (dev_is_pci(hcd->self.controller)) {
struct pci_dev *pdev;
u32 offset, cap, cap2;
unsigned count = 256/4;
@@ -763,7 +886,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
temp = scnprintf (next, size, "uframe %04x\n",
- ehci_readl(ehci, &ehci->regs->frame_index));
+ ehci_read_frame_index(ehci));
size -= temp;
next += temp;
@@ -784,16 +907,18 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
}
- if (ehci->reclaim) {
- temp = scnprintf(next, size, "reclaim qh %p\n", ehci->reclaim);
+ if (!list_empty(&ehci->async_unlink)) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ list_first_entry(&ehci->async_unlink,
+ struct ehci_qh, unlink_node));
size -= temp;
next += temp;
}
#ifdef EHCI_STATS
temp = scnprintf (next, size,
- "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
- ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
+ "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
ehci->stats.lost_iaa);
size -= temp;
next += temp;
@@ -832,7 +957,7 @@ static int fill_buffer(struct debug_buffer *buf)
int ret = 0;
if (!buf->output_buf)
- buf->output_buf = (char *)vmalloc(buf->alloc_size);
+ buf->output_buf = vmalloc(buf->alloc_size);
if (!buf->output_buf) {
ret = -ENOMEM;
@@ -879,13 +1004,13 @@ static int debug_close(struct inode *inode, struct file *file)
struct debug_buffer *buf = file->private_data;
if (buf) {
- if (buf->output_buf)
- vfree(buf->output_buf);
+ vfree(buf->output_buf);
kfree(buf);
}
return 0;
}
+
static int debug_async_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
@@ -893,6 +1018,14 @@ static int debug_async_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
+static int debug_bandwidth_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_bandwidth_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
static int debug_periodic_open(struct inode *inode, struct file *file)
{
struct debug_buffer *buf;
@@ -919,45 +1052,33 @@ static inline void create_debug_files (struct ehci_hcd *ehci)
ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root);
if (!ehci->debug_dir)
- goto dir_error;
-
- ehci->debug_async = debugfs_create_file("async", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_async_fops);
- if (!ehci->debug_async)
- goto async_error;
-
- ehci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_periodic_fops);
- if (!ehci->debug_periodic)
- goto periodic_error;
-
- ehci->debug_registers = debugfs_create_file("registers", S_IRUGO,
- ehci->debug_dir, bus,
- &debug_registers_fops);
- if (!ehci->debug_registers)
- goto registers_error;
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
+ &debug_bandwidth_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
return;
-registers_error:
- debugfs_remove(ehci->debug_periodic);
-periodic_error:
- debugfs_remove(ehci->debug_async);
-async_error:
- debugfs_remove(ehci->debug_dir);
-dir_error:
- ehci->debug_periodic = NULL;
- ehci->debug_async = NULL;
- ehci->debug_dir = NULL;
+file_error:
+ debugfs_remove_recursive(ehci->debug_dir);
}
static inline void remove_debug_files (struct ehci_hcd *ehci)
{
- debugfs_remove(ehci->debug_registers);
- debugfs_remove(ehci->debug_periodic);
- debugfs_remove(ehci->debug_async);
- debugfs_remove(ehci->debug_dir);
+ debugfs_remove_recursive(ehci->debug_dir);
}
#endif /* STUB_DEBUG_FILES */
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
new file mode 100644
index 00000000000..d1c76216350
--- /dev/null
+++ b/drivers/usb/host/ehci-exynos.c
@@ -0,0 +1,390 @@
+/*
+ * SAMSUNG EXYNOS USB HOST EHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/samsung_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI EXYNOS driver"
+
+#define EHCI_INSNREG00(base) (base + 0x90)
+#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
+#define EHCI_INSNREG00_ENA_INCR8 (0x1 << 24)
+#define EHCI_INSNREG00_ENA_INCR4 (0x1 << 23)
+#define EHCI_INSNREG00_ENA_INCRX_ALIGN (0x1 << 22)
+#define EHCI_INSNREG00_ENABLE_DMA_BURST \
+ (EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
+ EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
+
+static const char hcd_name[] = "ehci-exynos";
+static struct hc_driver __read_mostly exynos_ehci_hc_driver;
+
+#define PHY_NUMBER 3
+
+struct exynos_ehci_hcd {
+ struct clk *clk;
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ struct phy *phy_g[PHY_NUMBER];
+};
+
+#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
+
+static int exynos_ehci_get_phy(struct device *dev,
+ struct exynos_ehci_hcd *exynos_ehci)
+{
+ struct device_node *child;
+ struct phy *phy;
+ int phy_number;
+ int ret = 0;
+
+ exynos_ehci->phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(exynos_ehci->phy)) {
+ ret = PTR_ERR(exynos_ehci->phy);
+ if (ret != -ENXIO && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ } else {
+ exynos_ehci->otg = exynos_ehci->phy->otg;
+ }
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &phy_number);
+ if (ret) {
+ dev_err(dev, "Failed to parse device tree\n");
+ of_node_put(child);
+ return ret;
+ }
+
+ if (phy_number >= PHY_NUMBER) {
+ dev_err(dev, "Invalid number of PHYs\n");
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ phy = devm_of_phy_get(dev, child, 0);
+ of_node_put(child);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ if (ret != -ENOSYS && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ }
+ exynos_ehci->phy_g[phy_number] = phy;
+ }
+
+ return ret;
+}
+
+static int exynos_ehci_phy_enable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int i;
+ int ret = 0;
+
+ if (!IS_ERR(exynos_ehci->phy))
+ return usb_phy_init(exynos_ehci->phy);
+
+ for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ ret = phy_power_on(exynos_ehci->phy_g[i]);
+ if (ret)
+ for (i--; i >= 0; i--)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ phy_power_off(exynos_ehci->phy_g[i]);
+
+ return ret;
+}
+
+static void exynos_ehci_phy_disable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int i;
+
+ if (!IS_ERR(exynos_ehci->phy)) {
+ usb_phy_shutdown(exynos_ehci->phy);
+ return;
+ }
+
+ for (i = 0; i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ehci->phy_g[i]))
+ phy_power_off(exynos_ehci->phy_g[i]);
+}
+
+static void exynos_setup_vbus_gpio(struct device *dev)
+{
+ int err;
+ int gpio;
+
+ if (!dev->of_node)
+ return;
+
+ gpio = of_get_named_gpio(dev->of_node, "samsung,vbus-gpio", 0);
+ if (!gpio_is_valid(gpio))
+ return;
+
+ err = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_HIGH,
+ "ehci_vbus_gpio");
+ if (err)
+ dev_err(dev, "can't request ehci vbus gpio %d", gpio);
+}
+
+static int exynos_ehci_probe(struct platform_device *pdev)
+{
+ struct exynos_ehci_hcd *exynos_ehci;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource *res;
+ int irq;
+ int err;
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we move to full device tree support this will vanish off.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ exynos_setup_vbus_gpio(&pdev->dev);
+
+ hcd = usb_create_hcd(&exynos_ehci_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+ exynos_ehci = to_exynos_ehci(hcd);
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "samsung,exynos5440-ehci"))
+ goto skip_phy;
+
+ err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci);
+ if (err)
+ goto fail_clk;
+
+skip_phy:
+
+ exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
+
+ if (IS_ERR(exynos_ehci->clk)) {
+ dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+ err = PTR_ERR(exynos_ehci->clk);
+ goto fail_clk;
+ }
+
+ err = clk_prepare_enable(exynos_ehci->clk);
+ if (err)
+ goto fail_clk;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto fail_io;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail_io;
+ }
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ err = exynos_ehci_phy_enable(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable USB phy\n");
+ goto fail_io;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+
+ /* DMA burst Enable */
+ writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ platform_set_drvdata(pdev, hcd);
+
+ return 0;
+
+fail_add_hcd:
+ exynos_ehci_phy_disable(&pdev->dev);
+fail_io:
+ clk_disable_unprepare(exynos_ehci->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int exynos_ehci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+
+ usb_remove_hcd(hcd);
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ exynos_ehci_phy_disable(&pdev->dev);
+
+ clk_disable_unprepare(exynos_ehci->clk);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos_ehci_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+
+ bool do_wakeup = device_may_wakeup(dev);
+ int rc;
+
+ rc = ehci_suspend(hcd, do_wakeup);
+ if (rc)
+ return rc;
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ exynos_ehci_phy_disable(dev);
+
+ clk_disable_unprepare(exynos_ehci->clk);
+
+ return rc;
+}
+
+static int exynos_ehci_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+ int ret;
+
+ clk_prepare_enable(exynos_ehci->clk);
+
+ if (exynos_ehci->otg)
+ exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
+
+ ret = exynos_ehci_phy_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable USB phy\n");
+ clk_disable_unprepare(exynos_ehci->clk);
+ return ret;
+ }
+
+ /* DMA burst Enable */
+ writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+#else
+#define exynos_ehci_suspend NULL
+#define exynos_ehci_resume NULL
+#endif
+
+static const struct dev_pm_ops exynos_ehci_pm_ops = {
+ .suspend = exynos_ehci_suspend,
+ .resume = exynos_ehci_resume,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_ehci_match[] = {
+ { .compatible = "samsung,exynos4210-ehci" },
+ { .compatible = "samsung,exynos5440-ehci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_ehci_match);
+#endif
+
+static struct platform_driver exynos_ehci_driver = {
+ .probe = exynos_ehci_probe,
+ .remove = exynos_ehci_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "exynos-ehci",
+ .owner = THIS_MODULE,
+ .pm = &exynos_ehci_pm_ops,
+ .of_match_table = of_match_ptr(exynos_ehci_match),
+ }
+};
+static const struct ehci_driver_overrides exynos_overrides __initdata = {
+ .extra_priv_size = sizeof(struct exynos_ehci_hcd),
+};
+
+static int __init ehci_exynos_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
+ return platform_driver_register(&exynos_ehci_driver);
+}
+module_init(ehci_exynos_init);
+
+static void __exit ehci_exynos_cleanup(void)
+{
+ platform_driver_unregister(&exynos_ehci_driver);
+}
+module_exit(ehci_exynos_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:exynos-ehci");
+MODULE_AUTHOR("Jingoo Han");
+MODULE_AUTHOR("Joonyoung Shim");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 01c3da34f67..cf2734b532a 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2005 MontaVista Software
+ * Copyright 2005-2009 MontaVista Software, Inc.
+ * Copyright 2008,2012 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,17 +18,21 @@
*
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
+ * Power Management support by Dave Liu <daveliu@freescale.com>,
+ * Jerry Huang <Chang-Ming.Huang@freescale.com> and
+ * Anton Vorontsov <avorontsov@ru.mvista.com>.
*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include "ehci-fsl.h"
-/* FIXME: Power Management is un-ported so temporarily disable it */
-#undef CONFIG_PM
-
-
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -40,20 +45,19 @@
* Allocates basic resources for this USB host controller.
*
*/
-int usb_hcd_fsl_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int usb_hcd_fsl_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct usb_hcd *hcd;
struct resource *res;
int irq;
int retval;
- unsigned int temp;
pr_debug("initializing FSL-SOC USB Controller\n");
/* Need platform data for setup */
- pdata = (struct fsl_usb2_platform_data *)pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev,
"No platform data for %s.\n", dev_name(&pdev->dev));
@@ -97,42 +101,67 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
goto err2;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- retval = -EBUSY;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
goto err2;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -EFAULT;
- goto err3;
+ pdata->regs = hcd->regs;
+
+ if (pdata->power_budget)
+ hcd->power_budget = pdata->power_budget;
+
+ /*
+ * do platform specific init: check the clock, grab/config pins, etc.
+ */
+ if (pdata->init && pdata->init(pdev)) {
+ retval = -ENODEV;
+ goto err2;
}
- /* Enable USB controller */
- temp = in_be32(hcd->regs + 0x500);
- out_be32(hcd->regs + 0x500, temp | 0x4);
+ /* Enable USB controller, 83xx or 8536 */
+ if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
+ setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
- /* Set to Host mode */
- temp = in_le32(hcd->regs + 0x1a8);
- out_le32(hcd->regs + 0x1a8, temp | 0x3);
+ /* Don't need to set host mode here. It will be done by tdi_reset() */
- retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
- goto err4;
+ goto err2;
+ device_wakeup_enable(hcd->self.controller);
+
+#ifdef CONFIG_USB_OTG
+ if (pdata->operating_mode == FSL_USB2_DR_OTG) {
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ hcd->phy = usb_get_phy(USB_PHY_TYPE_USB2);
+ dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, phy=0x%p\n",
+ hcd, ehci, hcd->phy);
+
+ if (!IS_ERR_OR_NULL(hcd->phy)) {
+ retval = otg_set_host(hcd->phy->otg,
+ &ehci_to_hcd(ehci)->self);
+ if (retval) {
+ usb_put_phy(hcd->phy);
+ goto err2;
+ }
+ } else {
+ dev_err(&pdev->dev, "can't find phy\n");
+ retval = -ENODEV;
+ goto err2;
+ }
+ }
+#endif
return retval;
- err4:
- iounmap(hcd->regs);
- err3:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
+ if (pdata->exit)
+ pdata->exit(pdev);
return retval;
}
@@ -147,21 +176,53 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
* Reverses the effect of usb_hcd_fsl_probe().
*
*/
-void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
+ struct platform_device *pdev)
{
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ if (!IS_ERR_OR_NULL(hcd->phy)) {
+ otg_set_host(hcd->phy->otg, NULL);
+ usb_put_phy(hcd->phy);
+ }
+
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ /*
+ * do platform specific un-initialization:
+ * release iomux pins, disable clock, etc.
+ */
+ if (pdata->exit)
+ pdata->exit(pdev);
usb_put_hcd(hcd);
}
-static void mpc83xx_setup_phy(struct ehci_hcd *ehci,
- enum fsl_usb2_phy_modes phy_mode,
- unsigned int port_offset)
+static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
+ enum fsl_usb2_phy_modes phy_mode,
+ unsigned int port_offset)
{
- u32 portsc = 0;
+ u32 portsc;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+ struct device *dev = hcd->self.controller;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+
+ if (pdata->controller_ver < 0) {
+ dev_warn(hcd->self.controller, "Could not get controller version\n");
+ return -ENODEV;
+ }
+
+ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
+ portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
+
switch (phy_mode) {
case FSL_USB2_PHY_ULPI:
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
+ /* controller version 1.6 or above */
+ clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
+ setbits32(non_ehci + FSL_SOC_USB_CTRL,
+ ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
+ }
portsc |= PORT_PTS_ULPI;
break;
case FSL_USB2_PHY_SERIAL:
@@ -171,45 +232,67 @@ static void mpc83xx_setup_phy(struct ehci_hcd *ehci,
portsc |= PORT_PTS_PTW;
/* fall through */
case FSL_USB2_PHY_UTMI:
+ if (pdata->have_sysif_regs && pdata->controller_ver) {
+ /* controller version 1.6 or above */
+ setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
+ mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI PHY CLK to
+ become stable - 10ms*/
+ }
+ /* enable UTMI PHY */
+ if (pdata->have_sysif_regs)
+ setbits32(non_ehci + FSL_SOC_USB_CTRL,
+ CTRL_UTMI_PHY_EN);
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
break;
}
+
+ if (pdata->have_sysif_regs &&
+ pdata->controller_ver > FSL_USB_VER_1_6 &&
+ (phy_mode == FSL_USB2_PHY_ULPI)) {
+ /* check PHY_CLK_VALID to get phy clk valid */
+ if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+ PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
+ in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
+ dev_warn(hcd->self.controller, "USB PHY clock invalid\n");
+ return -EINVAL;
+ }
+ }
+
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
+
+ if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
+ setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
+
+ return 0;
}
-static void mpc83xx_usb_setup(struct usb_hcd *hcd)
+static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
struct fsl_usb2_platform_data *pdata;
void __iomem *non_ehci = hcd->regs;
- u32 temp;
- pdata =
- (struct fsl_usb2_platform_data *)hcd->self.controller->
- platform_data;
- /* Enable PHY interface in the control reg. */
- temp = in_be32(non_ehci + FSL_SOC_USB_CTRL);
- out_be32(non_ehci + FSL_SOC_USB_CTRL, temp | 0x00000004);
- out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b);
+ pdata = dev_get_platdata(hcd->self.controller);
-#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
- /*
- * Turn on cache snooping hardware, since some PowerPC platforms
- * wholly rely on hardware to deal with cache coherent
- */
+ if (pdata->have_sysif_regs) {
+ /*
+ * Turn on cache snooping hardware, since some PowerPC platforms
+ * wholly rely on hardware to deal with cache coherent
+ */
- /* Setup Snooping for all the 4GB space */
- /* SNOOP1 starts from 0x0, size 2G */
- out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
- /* SNOOP2 starts from 0x80000000, size 2G */
- out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
-#endif
+ /* Setup Snooping for all the 4GB space */
+ /* SNOOP1 starts from 0x0, size 2G */
+ out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
+ /* SNOOP2 starts from 0x80000000, size 2G */
+ out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
+ }
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
- mpc83xx_setup_phy(ehci, pdata->phy_mode, 0);
+ if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
+ return -EINVAL;
if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
unsigned int chip, rev, svr;
@@ -223,28 +306,33 @@ static void mpc83xx_usb_setup(struct usb_hcd *hcd)
ehci->has_fsl_port_bug = 1;
if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
- mpc83xx_setup_phy(ehci, pdata->phy_mode, 0);
+ if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
+ return -EINVAL;
+
if (pdata->port_enables & FSL_USB2_PORT1_ENABLED)
- mpc83xx_setup_phy(ehci, pdata->phy_mode, 1);
+ if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 1))
+ return -EINVAL;
}
- /* put controller in host mode. */
- ehci_writel(ehci, 0x00000003, non_ehci + FSL_SOC_USB_USBMODE);
-#ifdef CONFIG_PPC_85xx
- out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
- out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
+ if (pdata->have_sysif_regs) {
+#ifdef CONFIG_FSL_SOC_BOOKE
+ out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
+ out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
#else
- out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
- out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
+ out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
+ out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
#endif
- out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
+ out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
+ }
+
+ return 0;
}
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_fsl_reinit(struct ehci_hcd *ehci)
{
- mpc83xx_usb_setup(ehci_to_hcd(ehci));
- ehci_port_power(ehci, 0);
+ if (ehci_fsl_usb_setup(ehci))
+ return -EINVAL;
return 0;
}
@@ -254,46 +342,317 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
+ struct fsl_usb2_platform_data *pdata;
+ struct device *dev;
+
+ dev = hcd->self.controller;
+ pdata = dev_get_platdata(hcd->self.controller);
+ ehci->big_endian_desc = pdata->big_endian_desc;
+ ehci->big_endian_mmio = pdata->big_endian_mmio;
/* EHCI registers start at offset 0x100 */
ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+#ifdef CONFIG_PPC_83xx
+ /*
+ * Deal with MPC834X that need port power to be cycled after the power
+ * fault condition is removed. Otherwise the state machine does not
+ * reflect PORTSC[CSC] correctly.
+ */
+ ehci->need_oc_pp_cycle = 1;
+#endif
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
+ hcd->has_tt = 1;
- /* data structure init */
- retval = ehci_init(hcd);
+ retval = ehci_setup(hcd);
if (retval)
return retval;
- hcd->has_tt = 1;
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ /*
+ * set SBUSCFG:AHBBRST so that control msgs don't
+ * fail when doing heavy PATA writes.
+ */
+ ehci_writel(ehci, SBUSCFG_INCR8,
+ hcd->regs + FSL_SOC_USB_SBUSCFG);
+ }
+
+ retval = ehci_fsl_reinit(ehci);
+ return retval;
+}
+
+struct ehci_fsl {
+ struct ehci_hcd ehci;
+
+#ifdef CONFIG_PM
+ /* Saved USB PHY settings, need to restore after deep sleep. */
+ u32 usb_ctrl;
+#endif
+};
+
+#ifdef CONFIG_PM
+
+#ifdef CONFIG_PPC_MPC512x
+static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+ u32 tmp;
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+ u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE);
+ mode &= USBMODE_CM_MASK;
+ tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */
+
+ dev_dbg(dev, "suspend=%d already_suspended=%d "
+ "mode=%d usbcmd %08x\n", pdata->suspended,
+ pdata->already_suspended, mode, tmp);
+#endif
+
+ /*
+ * If the controller is already suspended, then this must be a
+ * PM suspend. Remember this fact, so that we will leave the
+ * controller suspended at PM resume time.
+ */
+ if (pdata->suspended) {
+ dev_dbg(dev, "already suspended, leaving early\n");
+ pdata->already_suspended = 1;
+ return 0;
+ }
+
+ dev_dbg(dev, "suspending...\n");
+
+ ehci->rh_state = EHCI_RH_SUSPENDED;
+ dev->power.power_state = PMSG_SUSPEND;
+
+ /* ignore non-host interrupts */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ /* stop the controller */
+ tmp = ehci_readl(ehci, &ehci->regs->command);
+ tmp &= ~CMD_RUN;
+ ehci_writel(ehci, tmp, &ehci->regs->command);
+
+ /* save EHCI registers */
+ pdata->pm_command = ehci_readl(ehci, &ehci->regs->command);
+ pdata->pm_command &= ~CMD_RUN;
+ pdata->pm_status = ehci_readl(ehci, &ehci->regs->status);
+ pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable);
+ pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index);
+ pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment);
+ pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list);
+ pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next);
+ pdata->pm_configured_flag =
+ ehci_readl(ehci, &ehci->regs->configured_flag);
+ pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ pdata->pm_usbgenctrl = ehci_readl(ehci,
+ hcd->regs + FSL_SOC_USB_USBGENCTRL);
+
+ /* clear the W1C bits */
+ pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS);
+
+ pdata->suspended = 1;
+
+ /* clear PP to cut power to the port */
+ tmp = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ tmp &= ~PORT_POWER;
+ ehci_writel(ehci, tmp, &ehci->regs->port_status[0]);
+
+ return 0;
+}
- ehci->sbrn = 0x20;
+static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+ u32 tmp;
+
+ dev_dbg(dev, "suspend=%d already_suspended=%d\n",
+ pdata->suspended, pdata->already_suspended);
+
+ /*
+ * If the controller was already suspended at suspend time,
+ * then don't resume it now.
+ */
+ if (pdata->already_suspended) {
+ dev_dbg(dev, "already suspended, leaving early\n");
+ pdata->already_suspended = 0;
+ return 0;
+ }
+
+ if (!pdata->suspended) {
+ dev_dbg(dev, "not suspended, leaving early\n");
+ return 0;
+ }
+
+ pdata->suspended = 0;
+
+ dev_dbg(dev, "resuming...\n");
+
+ /* set host mode */
+ tmp = USBMODE_CM_HOST | (pdata->es ? USBMODE_ES : 0);
+ ehci_writel(ehci, tmp, hcd->regs + FSL_SOC_USB_USBMODE);
+
+ ehci_writel(ehci, pdata->pm_usbgenctrl,
+ hcd->regs + FSL_SOC_USB_USBGENCTRL);
+ ehci_writel(ehci, ISIPHYCTRL_PXE | ISIPHYCTRL_PHYE,
+ hcd->regs + FSL_SOC_USB_ISIPHYCTRL);
+
+ ehci_writel(ehci, SBUSCFG_INCR8, hcd->regs + FSL_SOC_USB_SBUSCFG);
+
+ /* restore EHCI registers */
+ ehci_writel(ehci, pdata->pm_command, &ehci->regs->command);
+ ehci_writel(ehci, pdata->pm_intr_enable, &ehci->regs->intr_enable);
+ ehci_writel(ehci, pdata->pm_frame_index, &ehci->regs->frame_index);
+ ehci_writel(ehci, pdata->pm_segment, &ehci->regs->segment);
+ ehci_writel(ehci, pdata->pm_frame_list, &ehci->regs->frame_list);
+ ehci_writel(ehci, pdata->pm_async_next, &ehci->regs->async_next);
+ ehci_writel(ehci, pdata->pm_configured_flag,
+ &ehci->regs->configured_flag);
+ ehci_writel(ehci, pdata->pm_portsc, &ehci->regs->port_status[0]);
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ ehci->rh_state = EHCI_RH_RUNNING;
+ dev->power.power_state = PMSG_ON;
+
+ tmp = ehci_readl(ehci, &ehci->regs->command);
+ tmp |= CMD_RUN;
+ ehci_writel(ehci, tmp, &ehci->regs->command);
+
+ usb_hcd_resume_root_hub(hcd);
+
+ return 0;
+}
+#else
+static inline int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
+
+static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ return container_of(ehci, struct ehci_fsl, ehci);
+}
+
+static int ehci_fsl_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ return ehci_fsl_mpc512x_drv_suspend(dev);
+ }
+
+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
+ device_may_wakeup(dev));
+ if (!fsl_deep_sleep())
+ return 0;
+
+ ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+ return 0;
+}
+
+static int ehci_fsl_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (of_device_is_compatible(dev->parent->of_node,
+ "fsl,mpc5121-usb2-dr")) {
+ return ehci_fsl_mpc512x_drv_resume(dev);
+ }
+
+ ehci_prepare_ports_for_controller_resume(ehci);
+ if (!fsl_deep_sleep())
+ return 0;
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
+ /* Restore USB PHY settings and enable the controller. */
+ out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
ehci_reset(ehci);
+ ehci_fsl_reinit(ehci);
- retval = ehci_fsl_reinit(ehci);
- return retval;
+ return 0;
+}
+
+static int ehci_fsl_drv_restore(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+ return 0;
}
+static struct dev_pm_ops ehci_fsl_pm_ops = {
+ .suspend = ehci_fsl_drv_suspend,
+ .resume = ehci_fsl_drv_resume,
+ .restore = ehci_fsl_drv_restore,
+};
+
+#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
+#else
+#define EHCI_FSL_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_OTG
+static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 status;
+
+ if (!port)
+ return -EINVAL;
+
+ port--;
+
+ /* start port reset before HNP protocol time out */
+ status = readl(&ehci->regs->port_status[port]);
+ if (!(status & PORT_CONNECT))
+ return -ENODEV;
+
+ /* khubd will finish the reset later */
+ if (ehci_is_TDI(ehci)) {
+ writel(PORT_RESET |
+ (status & ~(PORT_CSC | PORT_PEC | PORT_OCC)),
+ &ehci->regs->port_status[port]);
+ } else {
+ writel(PORT_RESET, &ehci->regs->port_status[port]);
+ }
+
+ return 0;
+}
+#else
+#define ehci_start_port_reset NULL
+#endif /* CONFIG_USB_OTG */
+
+
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
+ .hcd_priv_size = sizeof(struct ehci_fsl),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
@@ -309,6 +668,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
@@ -322,8 +682,11 @@ static const struct hc_driver ehci_fsl_hc_driver = {
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
+ .start_port_reset = ehci_start_port_reset,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_fsl_drv_probe(struct platform_device *pdev)
@@ -351,6 +714,8 @@ static struct platform_driver ehci_fsl_driver = {
.remove = ehci_fsl_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .name = "fsl-ehci",
+ .name = "fsl-ehci",
+ .owner = THIS_MODULE,
+ .pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index b5e59db5334..dbd292e9f0a 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2005 freescale semiconductor
+/* Copyright (C) 2005-2010,2012 Freescale Semiconductor, Inc.
* Copyright (c) 2005 MontaVista Software
*
* This program is free software; you can redistribute it and/or modify it
@@ -19,6 +19,8 @@
#define _EHCI_FSL_H
/* offsets for the non-ehci registers in the FSL SOC USB controller */
+#define FSL_SOC_USB_SBUSCFG 0x90
+#define SBUSCFG_INCR8 0x02 /* INCR8, specified */
#define FSL_SOC_USB_ULPIVP 0x170
#define FSL_SOC_USB_PORTSC1 0x184
#define PORT_PTS_MSK (3<<30)
@@ -28,11 +30,36 @@
#define PORT_PTS_PTW (1<<28)
#define FSL_SOC_USB_PORTSC2 0x188
#define FSL_SOC_USB_USBMODE 0x1a8
+#define USBMODE_CM_MASK (3 << 0) /* controller mode mask */
+#define USBMODE_CM_HOST (3 << 0) /* controller mode: host */
+#define USBMODE_ES (1 << 2) /* (Big) Endian Select */
+
+#define FSL_SOC_USB_USBGENCTRL 0x200
+#define USBGENCTRL_PPP (1 << 3)
+#define USBGENCTRL_PFP (1 << 2)
+#define FSL_SOC_USB_ISIPHYCTRL 0x204
+#define ISIPHYCTRL_PXE (1)
+#define ISIPHYCTRL_PHYE (1 << 4)
+
#define FSL_SOC_USB_SNOOP1 0x400 /* NOTE: big-endian */
#define FSL_SOC_USB_SNOOP2 0x404 /* NOTE: big-endian */
#define FSL_SOC_USB_AGECNTTHRSH 0x408 /* NOTE: big-endian */
#define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */
#define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */
#define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */
+#define CTRL_UTMI_PHY_EN (1<<9)
+#define CTRL_PHY_CLK_VALID (1 << 17)
#define SNOOP_SIZE_2GB 0x1e
+
+/* control Register Bit Masks */
+#define ULPI_INT_EN (1<<0)
+#define WU_INT_EN (1<<1)
+#define USB_CTRL_USB_EN (1<<2)
+#define LINE_STATE_FILTER__EN (1<<3)
+#define KEEP_OTG_ON (1<<4)
+#define OTG_PORT (1<<5)
+#define PLL_RESET (1<<8)
+#define UTMI_PHY_EN (1<<9)
+#define ULPI_PHY_CLK_SEL (1<<10)
+#define PHY_CLK_VALID (1<<17)
#endif /* _EHCI_FSL_H */
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
new file mode 100644
index 00000000000..495b6fbcbcd
--- /dev/null
+++ b/drivers/usb/host/ehci-grlib.c
@@ -0,0 +1,193 @@
+/*
+ * Driver for Aeroflex Gaisler GRLIB GRUSBHC EHCI host controller
+ *
+ * GRUSBHC is typically found on LEON/GRLIB SoCs
+ *
+ * (c) Jan Andersson <jan@gaisler.com>
+ *
+ * Based on ehci-ppc-of.c which is:
+ * (c) Valentine Barshak <vbarshak@ru.mvista.com>
+ * and in turn based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/signal.h>
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#define GRUSBHC_HCIVERSION 0x0100 /* Known value of cap. reg. HCIVERSION */
+
+static const struct hc_driver ehci_grlib_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "GRLIB GRUSBHC EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+
+static int ehci_hcd_grlib_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci = NULL;
+ struct resource res;
+ u32 hc_capbase;
+ int irq;
+ int rv;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ dev_dbg(&op->dev, "initializing GRUSBHC EHCI USB Controller\n");
+
+ rv = of_address_to_resource(dn, 0, &res);
+ if (rv)
+ return rv;
+
+ /* usb_create_hcd requires dma_mask != NULL */
+ op->dev.dma_mask = &op->dev.coherent_dma_mask;
+ hcd = usb_create_hcd(&ehci_grlib_hc_driver, &op->dev,
+ "GRUSBHC EHCI USB");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res.start;
+ hcd->rsrc_len = resource_size(&res);
+
+ irq = irq_of_parse_and_map(dn, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
+ rv = -EBUSY;
+ goto err_irq;
+ }
+
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
+ goto err_ioremap;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+
+ ehci->caps = hcd->regs;
+
+ /* determine endianness of this implementation */
+ hc_capbase = ehci_readl(ehci, &ehci->caps->hc_capbase);
+ if (HC_VERSION(ehci, hc_capbase) != GRUSBHC_HCIVERSION) {
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+ ehci->big_endian_capbase = 1;
+ }
+
+ rv = usb_add_hcd(hcd, irq, 0);
+ if (rv)
+ goto err_ioremap;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+
+err_ioremap:
+ irq_dispose_mapping(irq);
+err_irq:
+ usb_put_hcd(hcd);
+
+ return rv;
+}
+
+
+static int ehci_hcd_grlib_remove(struct platform_device *op)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(op);
+
+ dev_dbg(&op->dev, "stopping GRLIB GRUSBHC EHCI USB Controller\n");
+
+ usb_remove_hcd(hcd);
+
+ irq_dispose_mapping(hcd->irq);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+
+static const struct of_device_id ehci_hcd_grlib_of_match[] = {
+ {
+ .name = "GAISLER_EHCI",
+ },
+ {
+ .name = "01_026",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match);
+
+
+static struct platform_driver ehci_grlib_driver = {
+ .probe = ehci_hcd_grlib_probe,
+ .remove = ehci_hcd_grlib_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "grlib-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_hcd_grlib_of_match,
+ },
+};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 4725d15d096..81cda09b47e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,4 +1,8 @@
/*
+ * Enhanced Host Controller Interface (EHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
* Copyright (c) 2000-2004 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
@@ -23,27 +27,28 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
-#include <linux/reboot.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
-
-#include "../core/hcd.h"
+#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/unaligned.h>
+#if defined(CONFIG_PPC_PS3)
+#include <asm/firmware.h>
+#endif
+
/*-------------------------------------------------------------------------*/
/*
@@ -66,25 +71,21 @@
static const char hcd_name [] = "ehci_hcd";
-#undef VERBOSE_DEBUG
#undef EHCI_URB_TRACE
-#ifdef DEBUG
-#define EHCI_STATS
-#endif
-
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
-#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
-
-#define EHCI_IAA_MSECS 10 /* arbitrary */
-#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
-#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
-#define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh = 0; // 0 to 6
@@ -97,7 +98,7 @@ module_param (park, uint, S_IRUGO);
MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
/* for flakey hardware, ignore overcurrent indicators */
-static int ignore_oc = 0;
+static bool ignore_oc = 0;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
@@ -106,12 +107,41 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
/*-------------------------------------------------------------------------*/
#include "ehci.h"
+#include "pci-quirks.h"
+
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+ struct ehci_tt *tt);
+
+/*
+ * The MosChip MCS9990 controller updates its microframe counter
+ * a little before the frame counter, and occasionally we will read
+ * the invalid intermediate value. Avoid problems by checking the
+ * microframe number (the low-order 3 bits); if they are 0 then
+ * re-read the register to get the correct value.
+ */
+static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci)
+{
+ unsigned uf;
+
+ uf = ehci_readl(ehci, &ehci->regs->frame_index);
+ if (unlikely((uf & 7) == 0))
+ uf = ehci_readl(ehci, &ehci->regs->frame_index);
+ return uf;
+}
+
+static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
+{
+ if (ehci->frame_index_bug)
+ return ehci_moschip_read_frame_index(ehci);
+ return ehci_readl(ehci, &ehci->regs->frame_index);
+}
+
#include "ehci-dbg.c"
/*-------------------------------------------------------------------------*/
/*
- * handshake - spin reading hc until handshake completes or fails
+ * ehci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
@@ -127,8 +157,8 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
-static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
- u32 mask, u32 done, int usec)
+int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
{
u32 result;
@@ -144,49 +174,57 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
} while (usec > 0);
return -ETIMEDOUT;
}
+EXPORT_SYMBOL_GPL(ehci_handshake);
+
+/* check TDI/ARC silicon is in host mode */
+static int tdi_in_host_mode (struct ehci_hcd *ehci)
+{
+ u32 tmp;
-/* force HC to halt state from unknown (EHCI spec section 2.3) */
+ tmp = ehci_readl(ehci, &ehci->regs->usbmode);
+ return (tmp & 3) == USBMODE_CM_HC;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
static int ehci_halt (struct ehci_hcd *ehci)
{
- u32 temp = ehci_readl(ehci, &ehci->regs->status);
+ u32 temp;
+
+ spin_lock_irq(&ehci->lock);
/* disable any irqs left enabled by previous code */
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- if ((temp & STS_HALT) != 0)
+ if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
+ spin_unlock_irq(&ehci->lock);
return 0;
+ }
+ /*
+ * This routine gets called during probe before ehci->command
+ * has been initialized, so we can't rely on its value.
+ */
+ ehci->command &= ~CMD_RUN;
temp = ehci_readl(ehci, &ehci->regs->command);
- temp &= ~CMD_RUN;
+ temp &= ~(CMD_RUN | CMD_IAAD);
ehci_writel(ehci, temp, &ehci->regs->command);
- return handshake (ehci, &ehci->regs->status,
- STS_HALT, STS_HALT, 16 * 125);
-}
-static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
- u32 mask, u32 done, int usec)
-{
- int error;
-
- error = handshake(ehci, ptr, mask, done, usec);
- if (error) {
- ehci_halt(ehci);
- ehci_to_hcd(ehci)->state = HC_STATE_HALT;
- ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n",
- ptr, mask, done, error);
- }
+ spin_unlock_irq(&ehci->lock);
+ synchronize_irq(ehci_to_hcd(ehci)->irq);
- return error;
+ return ehci_handshake(ehci, &ehci->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
}
/* put TDI/ARC silicon into EHCI mode */
static void tdi_reset (struct ehci_hcd *ehci)
{
- u32 __iomem *reg_ptr;
u32 tmp;
- reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
- tmp = ehci_readl(ehci, reg_ptr);
+ tmp = ehci_readl(ehci, &ehci->regs->usbmode);
tmp |= USBMODE_CM_HC;
/* The default byte access to MMR space is LE after
* controller reset. Set the required endian mode
@@ -194,136 +232,95 @@ static void tdi_reset (struct ehci_hcd *ehci)
*/
if (ehci_big_endian_mmio(ehci))
tmp |= USBMODE_BE;
- ehci_writel(ehci, tmp, reg_ptr);
+ ehci_writel(ehci, tmp, &ehci->regs->usbmode);
}
-/* reset a non-running (STS_HALT == 1) controller */
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
static int ehci_reset (struct ehci_hcd *ehci)
{
int retval;
u32 command = ehci_readl(ehci, &ehci->regs->command);
+ /* If the EHCI debug controller is active, special care must be
+ * taken before and after a host controller reset */
+ if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci)))
+ ehci->debug = NULL;
+
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
ehci_writel(ehci, command, &ehci->regs->command);
- ehci_to_hcd(ehci)->state = HC_STATE_HALT;
+ ehci->rh_state = EHCI_RH_HALTED;
ehci->next_statechange = jiffies;
- retval = handshake (ehci, &ehci->regs->command,
+ retval = ehci_handshake(ehci, &ehci->regs->command,
CMD_RESET, 0, 250 * 1000);
+ if (ehci->has_hostpc) {
+ ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
+ &ehci->regs->usbmode_ex);
+ ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
+ }
if (retval)
return retval;
if (ehci_is_TDI(ehci))
tdi_reset (ehci);
+ if (ehci->debug)
+ dbgp_external_startup(ehci_to_hcd(ehci));
+
+ ehci->port_c_suspend = ehci->suspended_ports =
+ ehci->resuming_ports = 0;
return retval;
}
-/* idle the controller (from running) */
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
static void ehci_quiesce (struct ehci_hcd *ehci)
{
u32 temp;
-#ifdef DEBUG
- if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
- BUG ();
-#endif
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
/* wait for any schedule enables/disables to take effect */
- temp = ehci_readl(ehci, &ehci->regs->command) << 10;
- temp &= STS_ASS | STS_PSS;
- if (handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_ASS | STS_PSS, temp, 16 * 125))
- return;
+ temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
+ ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
+ 16 * 125);
/* then disable anything that's still active */
- temp = ehci_readl(ehci, &ehci->regs->command);
- temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
- ehci_writel(ehci, temp, &ehci->regs->command);
+ spin_lock_irq(&ehci->lock);
+ ehci->command &= ~(CMD_ASE | CMD_PSE);
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ spin_unlock_irq(&ehci->lock);
/* hardware can take 16 microframes to turn off ... */
- handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_ASS | STS_PSS, 0, 16 * 125);
+ ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
+ 16 * 125);
}
/*-------------------------------------------------------------------------*/
static void end_unlink_async(struct ehci_hcd *ehci);
+static void unlink_empty_async(struct ehci_hcd *ehci);
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+#include "ehci-timer.c"
#include "ehci-hub.c"
#include "ehci-mem.c"
#include "ehci-q.c"
#include "ehci-sched.c"
+#include "ehci-sysfs.c"
/*-------------------------------------------------------------------------*/
-static void ehci_iaa_watchdog(unsigned long param)
-{
- struct ehci_hcd *ehci = (struct ehci_hcd *) param;
- unsigned long flags;
-
- spin_lock_irqsave (&ehci->lock, flags);
-
- /* Lost IAA irqs wedge things badly; seen first with a vt8235.
- * So we need this watchdog, but must protect it against both
- * (a) SMP races against real IAA firing and retriggering, and
- * (b) clean HC shutdown, when IAA watchdog was pending.
- */
- if (ehci->reclaim
- && !timer_pending(&ehci->iaa_watchdog)
- && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
- u32 cmd, status;
-
- /* If we get here, IAA is *REALLY* late. It's barely
- * conceivable that the system is so busy that CMD_IAAD
- * is still legitimately set, so let's be sure it's
- * clear before we read STS_IAA. (The HC should clear
- * CMD_IAAD when it sets STS_IAA.)
- */
- cmd = ehci_readl(ehci, &ehci->regs->command);
- if (cmd & CMD_IAAD)
- ehci_writel(ehci, cmd & ~CMD_IAAD,
- &ehci->regs->command);
-
- /* If IAA is set here it either legitimately triggered
- * before we cleared IAAD above (but _way_ late, so we'll
- * still count it as lost) ... or a silicon erratum:
- * - VIA seems to set IAA without triggering the IRQ;
- * - IAAD potentially cleared without setting IAA.
- */
- status = ehci_readl(ehci, &ehci->regs->status);
- if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT (ehci->stats.lost_iaa);
- ehci_writel(ehci, STS_IAA, &ehci->regs->status);
- }
-
- ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
- status, cmd);
- end_unlink_async(ehci);
- }
-
- spin_unlock_irqrestore(&ehci->lock, flags);
-}
-
-static void ehci_watchdog(unsigned long param)
-{
- struct ehci_hcd *ehci = (struct ehci_hcd *) param;
- unsigned long flags;
-
- spin_lock_irqsave(&ehci->lock, flags);
-
- /* stop async processing after it's idled a bit */
- if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
- start_unlink_async (ehci, ehci->async);
-
- /* ehci could run by timer, without IRQs ... */
- ehci_work (ehci);
-
- spin_unlock_irqrestore (&ehci->lock, flags);
-}
-
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
@@ -339,11 +336,14 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
/*
* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
- * Should be called with ehci->lock held.
+ * Must be called with interrupts enabled and the lock not held.
*/
static void ehci_silence_controller(struct ehci_hcd *ehci)
{
ehci_halt(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ ehci->rh_state = EHCI_RH_HALTED;
ehci_turn_off_all_ports(ehci);
/* make BIOS/etc use companion controller during reboot */
@@ -351,6 +351,7 @@ static void ehci_silence_controller(struct ehci_hcd *ehci)
/* unblock posted writes */
ehci_readl(ehci, &ehci->regs->configured_flag);
+ spin_unlock_irq(&ehci->lock);
}
/* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
@@ -361,30 +362,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- del_timer_sync(&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
-
spin_lock_irq(&ehci->lock);
- ehci_silence_controller(ehci);
+ ehci->shutdown = true;
+ ehci->rh_state = EHCI_RH_STOPPING;
+ ehci->enabled_hrtimer_events = 0;
spin_unlock_irq(&ehci->lock);
-}
-static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
-{
- unsigned port;
-
- if (!HCS_PPC (ehci->hcs_params))
- return;
+ ehci_silence_controller(ehci);
- ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down");
- for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
- (void) ehci_hub_control(ehci_to_hcd(ehci),
- is_on ? SetPortFeature : ClearPortFeature,
- USB_PORT_FEAT_POWER,
- port--, NULL, 0);
- /* Flush those writes */
- ehci_readl(ehci, &ehci->regs->command);
- msleep(20);
+ hrtimer_cancel(&ehci->hrtimer);
}
/*-------------------------------------------------------------------------*/
@@ -395,28 +381,33 @@ static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
*/
static void ehci_work (struct ehci_hcd *ehci)
{
- timer_action_done (ehci, TIMER_IO_WATCHDOG);
-
/* another CPU may drop ehci->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
- if (ehci->scanning)
+ if (ehci->scanning) {
+ ehci->need_rescan = true;
return;
- ehci->scanning = 1;
- scan_async (ehci);
- if (ehci->next_uframe != -1)
- scan_periodic (ehci);
- ehci->scanning = 0;
+ }
+ ehci->scanning = true;
+
+ rescan:
+ ehci->need_rescan = false;
+ if (ehci->async_count)
+ scan_async(ehci);
+ if (ehci->intr_count > 0)
+ scan_intr(ehci);
+ if (ehci->isoc_count > 0)
+ scan_isoc(ehci);
+ if (ehci->need_rescan)
+ goto rescan;
+ ehci->scanning = false;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
- if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
- (ehci->async->qh_next.ptr != NULL ||
- ehci->periodic_sched != 0))
- timer_action (ehci, TIMER_IO_WATCHDOG);
+ turn_on_io_watchdog(ehci);
}
/*
@@ -429,34 +420,27 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_dbg (ehci, "stop\n");
/* no more interrupts ... */
- del_timer_sync (&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
spin_lock_irq(&ehci->lock);
- if (HC_IS_RUNNING (hcd->state))
- ehci_quiesce (ehci);
+ ehci->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&ehci->lock);
+ ehci_quiesce(ehci);
ehci_silence_controller(ehci);
ehci_reset (ehci);
- spin_unlock_irq(&ehci->lock);
- remove_companion_file(ehci);
+ hrtimer_cancel(&ehci->hrtimer);
+ remove_sysfs_files(ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq (&ehci->lock);
- if (ehci->async)
- ehci_work (ehci);
+ end_free_itds(ehci);
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
-#ifdef EHCI_STATS
- ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
- ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
- ehci->stats.lost_iaa);
- ehci_dbg (ehci, "complete %ld unlink %ld\n",
- ehci->stats.complete, ehci->stats.unlink);
-#endif
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_dev_put();
dbg_status (ehci, "ehci_stop completed",
ehci_readl(ehci, &ehci->regs->status));
@@ -469,35 +453,59 @@ static int ehci_init(struct usb_hcd *hcd)
u32 temp;
int retval;
u32 hcc_params;
+ struct ehci_qh_hw *hw;
spin_lock_init(&ehci->lock);
- init_timer(&ehci->watchdog);
- ehci->watchdog.function = ehci_watchdog;
- ehci->watchdog.data = (unsigned long) ehci;
+ /*
+ * keep io watchdog by default, those good HCDs could turn off it later
+ */
+ ehci->need_io_watchdog = 1;
+
+ hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ehci->hrtimer.function = ehci_hrtimer_func;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+
+ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
- init_timer(&ehci->iaa_watchdog);
- ehci->iaa_watchdog.function = ehci_iaa_watchdog;
- ehci->iaa_watchdog.data = (unsigned long) ehci;
+ /*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ ehci->uframe_periodic_max = 100;
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
ehci->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&ehci->async_unlink);
+ INIT_LIST_HEAD(&ehci->async_idle);
+ INIT_LIST_HEAD(&ehci->intr_unlink_wait);
+ INIT_LIST_HEAD(&ehci->intr_unlink);
+ INIT_LIST_HEAD(&ehci->intr_qh_list);
+ INIT_LIST_HEAD(&ehci->cached_itd_list);
+ INIT_LIST_HEAD(&ehci->cached_sitd_list);
+ INIT_LIST_HEAD(&ehci->tt_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (EHCI_TUNE_FLS) {
+ case 0: ehci->periodic_size = 1024; break;
+ case 1: ehci->periodic_size = 512; break;
+ case 2: ehci->periodic_size = 256; break;
+ default: BUG();
+ }
+ }
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
- hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
- ehci->i_thresh = 8;
+ ehci->i_thresh = 0;
else // N microframes cached
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
- ehci->reclaim = NULL;
- ehci->next_uframe = -1;
-
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
@@ -506,17 +514,26 @@ static int ehci_init(struct usb_hcd *hcd)
* from automatically advancing to the next td after short reads.
*/
ehci->async->qh_next.qh = NULL;
- ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
- ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
- ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
- ehci->async->hw_qtd_next = EHCI_LIST_END(ehci);
+ hw = ehci->async->hw;
+ hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
+ hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
+#endif
+ hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
+ hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
- ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
+ hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
+ ehci->has_ppcd = 1;
+ ehci_dbg(ehci, "enable per-port change event\n");
+ temp |= CMD_PPCEE;
+ }
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
@@ -536,15 +553,12 @@ static int ehci_init(struct usb_hcd *hcd)
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
- switch (EHCI_TUNE_FLS) {
- case 0: ehci->periodic_size = 1024; break;
- case 1: ehci->periodic_size = 512; break;
- case 2: ehci->periodic_size = 256; break;
- default: BUG();
- }
}
ehci->command = temp;
+ /* Accept arbitrarily long scatter-gather lists */
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
return 0;
}
@@ -552,18 +566,13 @@ static int ehci_init(struct usb_hcd *hcd)
static int ehci_run (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- int retval;
u32 temp;
u32 hcc_params;
hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
/* EHCI spec section 4.1 */
- if ((retval = ehci_reset(ehci)) != 0) {
- ehci_mem_cleanup(ehci);
- return retval;
- }
+
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
@@ -584,7 +593,7 @@ static int ehci_run (struct usb_hcd *hcd)
ehci_writel(ehci, 0, &ehci->regs->segment);
#if 0
// this is deeply broken on almost all architectures
- if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK))
+ if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
ehci_info(ehci, "enabled 64bit DMA\n");
#endif
}
@@ -612,13 +621,14 @@ static int ehci_run (struct usb_hcd *hcd)
* be started before the port switching actions could complete.
*/
down_write(&ehci_cf_port_reset_rwsem);
- hcd->state = HC_STATE_RUNNING;
+ ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
up_write(&ehci_cf_port_reset_rwsem);
+ ehci->last_periodic_enable = ktime_get_real();
- temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci_info (ehci,
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
@@ -633,10 +643,40 @@ static int ehci_run (struct usb_hcd *hcd)
* since the class device isn't created that early.
*/
create_debug_files(ehci);
- create_companion_file(ehci);
+ create_sysfs_files(ehci);
+
+ return 0;
+}
+
+int ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci->regs = (void __iomem *)ehci->caps +
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ehci->sbrn = HCD_USB2;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ ehci_reset(ehci);
return 0;
}
+EXPORT_SYMBOL_GPL(ehci_setup);
/*-------------------------------------------------------------------------*/
@@ -645,8 +685,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
+ unsigned long flags;
- spin_lock (&ehci->lock);
+ /*
+ * For threadirqs option we use spin_lock_irqsave() variant to prevent
+ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
+ * in interrupt context even when threadirqs is specified. We can go
+ * back to spin_lock() variant when hrtimer callbacks become threaded.
+ */
+ spin_lock_irqsave(&ehci->lock, flags);
status = ehci_readl(ehci, &ehci->regs->status);
@@ -656,9 +703,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
goto dead;
}
- masked_status = status & INTR_MASK;
- if (!masked_status) { /* irq sharing? */
- spin_unlock(&ehci->lock);
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
+ /* Shared IRQ? */
+ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
return IRQ_NONE;
}
@@ -667,13 +720,6 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
cmd = ehci_readl(ehci, &ehci->regs->command);
bh = 0;
-#ifdef VERBOSE_DEBUG
- /* unrequested/ignored: Frame List Rollover */
- dbg_status (ehci, "irq", status);
-#endif
-
- /* INT, ERR, and IAA interrupt rates can be throttled */
-
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
@@ -685,33 +731,52 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
+ ++ehci->next_hrtimer_event;
+
/* guard against (alleged) silicon errata */
- if (cmd & CMD_IAAD) {
- ehci_writel(ehci, cmd & ~CMD_IAAD,
- &ehci->regs->command);
+ if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
- }
- if (ehci->reclaim) {
- COUNT(ehci->stats.reclaim);
- end_unlink_async(ehci);
- } else
- ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
+ if (ehci->iaa_in_progress)
+ COUNT(ehci->stats.iaa);
+ end_unlink_async(ehci);
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS (ehci->hcs_params);
+ u32 ppcd = ~0;
/* kick root hub later */
pcd_status = status;
/* resume root hub? */
- if (!(cmd & CMD_RUN))
+ if (ehci->rh_state == EHCI_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
+ /* get per-port change detect bits */
+ if (ehci->has_ppcd)
+ ppcd = status >> 16;
+
while (i--) {
- int pstatus = ehci_readl(ehci,
- &ehci->regs->port_status [i]);
+ int pstatus;
+
+ /* leverage per-port change bits feature */
+ if (!(ppcd & (1 << i)))
+ continue;
+ pstatus = ehci_readl(ehci,
+ &ehci->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
@@ -724,10 +789,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* start 20 msec resume signaling from this port,
* and make khubd collect PORT_STAT_C_SUSPEND to
- * stop that signaling.
+ * stop that signaling. Use 5 ms extra for safety,
+ * like usb_port_resume() does.
*/
- ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
+ ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
+ set_bit(i, &ehci->resuming_ports);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+ usb_hcd_start_port_resume(&hcd->self, i);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
}
@@ -737,19 +805,24 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
ehci_err(ehci, "fatal error\n");
dbg_cmd(ehci, "fatal", cmd);
dbg_status(ehci, "fatal", status);
- ehci_halt(ehci);
dead:
- ehci_reset(ehci);
- ehci_writel(ehci, 0, &ehci->regs->configured_flag);
- /* generic layer kills/unlinks all urbs, then
- * uses ehci_stop to clean up the rest
- */
- bh = 1;
+ usb_hc_died(hcd);
+
+ /* Don't let the controller do anything more */
+ ehci->shutdown = true;
+ ehci->rh_state = EHCI_RH_STOPPING;
+ ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ ehci_handle_controller_death(ehci);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
}
if (bh)
ehci_work (ehci);
- spin_unlock (&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
@@ -806,32 +879,6 @@ static int ehci_urb_enqueue (
}
}
-static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
-{
- /* failfast */
- if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
- end_unlink_async(ehci);
-
- /* if it's not linked then there's nothing to do */
- if (qh->qh_state != QH_STATE_LINKED)
- ;
-
- /* defer till later if busy */
- else if (ehci->reclaim) {
- struct ehci_qh *last;
-
- for (last = ehci->reclaim;
- last->reclaim;
- last = last->reclaim)
- continue;
- qh->qh_state = QH_STATE_UNLINK_WAIT;
- last->reclaim = qh;
-
- /* start IAA cycle */
- } else
- start_unlink_async (ehci, qh);
-}
-
/* remove from hardware lists
* completions normally happen asynchronously
*/
@@ -848,69 +895,34 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (rc)
goto done;
- switch (usb_pipetype (urb->pipe)) {
- // case PIPE_CONTROL:
- // case PIPE_BULK:
- default:
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ /*
+ * We don't expedite dequeue for isochronous URBs.
+ * Just wait until they complete normally or their
+ * time slot expires.
+ */
+ } else {
qh = (struct ehci_qh *) urb->hcpriv;
- if (!qh)
- break;
+ qh->exception = 1;
switch (qh->qh_state) {
case QH_STATE_LINKED:
+ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
+ start_unlink_intr(ehci, qh);
+ else
+ start_unlink_async(ehci, qh);
+ break;
case QH_STATE_COMPLETING:
- unlink_async(ehci, qh);
+ qh->dequeue_during_giveback = 1;
break;
case QH_STATE_UNLINK:
case QH_STATE_UNLINK_WAIT:
/* already started */
break;
case QH_STATE_IDLE:
- WARN_ON(1);
- break;
- }
- break;
-
- case PIPE_INTERRUPT:
- qh = (struct ehci_qh *) urb->hcpriv;
- if (!qh)
- break;
- switch (qh->qh_state) {
- case QH_STATE_LINKED:
- intr_deschedule (ehci, qh);
- /* FALL THROUGH */
- case QH_STATE_IDLE:
- qh_completions (ehci, qh);
+ /* QH might be waiting for a Clear-TT-Buffer */
+ qh_completions(ehci, qh);
break;
- default:
- ehci_dbg (ehci, "bogus qh %p state %d\n",
- qh, qh->qh_state);
- goto done;
}
-
- /* reschedule QH iff another request is queued */
- if (!list_empty (&qh->qtd_list)
- && HC_IS_RUNNING (hcd->state)) {
- rc = qh_schedule(ehci, qh);
-
- /* An error here likely indicates handshake failure
- * or no space left in the schedule. Neither fault
- * should happen often ...
- *
- * FIXME kill the now-dysfunctional queued urbs
- */
- if (rc != 0)
- ehci_err(ehci,
- "can't reschedule qh %p, err %d",
- qh, rc);
- }
- break;
-
- case PIPE_ISOCHRONOUS:
- // itd or sitd ...
-
- // wait till next completion, do it then.
- // completion irqs can wait up to 1024 msec,
- break;
}
done:
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -926,7 +938,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
unsigned long flags;
- struct ehci_qh *qh, *tmp;
+ struct ehci_qh *qh;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
@@ -940,24 +952,30 @@ rescan:
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
- if (qh->hw_info1 == 0) {
- ehci_vdbg (ehci, "iso delay\n");
- goto idle_timeout;
+ if (qh->hw == NULL) {
+ struct ehci_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ reserve_release_iso_bandwidth(ehci, stream, -1);
+ kfree(stream);
+ goto done;
}
- if (!HC_IS_RUNNING (hcd->state))
+ qh->exception = 1;
+ if (ehci->rh_state < EHCI_RH_RUNNING)
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
- for (tmp = ehci->async->qh_next.qh;
- tmp && tmp != qh;
- tmp = tmp->qh_next.qh)
- continue;
- /* periodic qh self-unlinks on empty */
- if (!tmp)
- goto nogood;
- unlink_async (ehci, qh);
+ WARN_ON(!list_empty(&qh->qtd_list));
+ if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
+ start_unlink_async(ehci, qh);
+ else
+ start_unlink_intr(ehci, qh);
/* FALL THROUGH */
+ case QH_STATE_COMPLETING: /* already in unlinking */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
idle_timeout:
@@ -965,13 +983,16 @@ idle_timeout:
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
+ if (qh->clearing_tt)
+ goto idle_timeout;
if (list_empty (&qh->qtd_list)) {
- qh_put (qh);
+ if (qh->ps.bw_uperiod)
+ reserve_release_intr_bandwidth(ehci, qh, -1);
+ qh_destroy(ehci, qh);
break;
}
/* else FALL THROUGH */
default:
-nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
@@ -980,38 +1001,258 @@ nogood:
list_empty (&qh->qtd_list) ? "" : "(has tds)");
break;
}
+ done:
ep->hcpriv = NULL;
-done:
spin_unlock_irqrestore (&ehci->lock, flags);
- return;
+}
+
+static void
+ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ unsigned long flags;
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else {
+ /* The toggle value in the QH can't be updated
+ * while the QH is active. Unlink it now;
+ * re-linking will call qh_refresh().
+ */
+ usb_settoggle(qh->ps.udev, epnum, is_out, 0);
+ qh->exception = 1;
+ if (eptype == USB_ENDPOINT_XFER_BULK)
+ start_unlink_async(ehci, qh);
+ else
+ start_unlink_intr(ehci, qh);
+ }
+ }
+ spin_unlock_irqrestore(&ehci->lock, flags);
}
static int ehci_get_frame (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
- ehci->periodic_size;
+ return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Device addition and removal */
+
+static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ spin_lock_irq(&ehci->lock);
+ drop_tt(udev);
+ spin_unlock_irq(&ehci->lock);
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_PM
+
+/* suspend/resume, section 4.3 */
+
+/* These routines handle the generic parts of controller suspend/resume */
+
+int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ if (time_before(jiffies, ehci->next_statechange))
+ msleep(10);
+
+ /*
+ * Root hub was already suspended. Disable IRQ emission and
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
+ */
+ ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+
+ spin_lock_irq(&ehci->lock);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ (void) ehci_readl(ehci, &ehci->regs->intr_enable);
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_unlock_irq(&ehci->lock);
+
+ synchronize_irq(hcd->irq);
+
+ /* Check for race with a wakeup request */
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ ehci_resume(hcd, false);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ehci_suspend);
+
+/* Returns 0 if power was preserved, 1 if power was lost */
+int ehci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ if (time_before(jiffies, ehci->next_statechange))
+ msleep(100);
+
+ /* Mark hardware accessible again as we are back to full power by now */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ if (ehci->shutdown)
+ return 0; /* Controller is dead */
+
+ /*
+ * If CF is still set and we aren't resuming from hibernation
+ * then we maintained suspend power.
+ * Just undo the effect of ehci_suspend().
+ */
+ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
+ !hibernated) {
+ int mask = INTR_MASK;
+
+ ehci_prepare_ports_for_controller_resume(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto skip;
+
+ if (!hcd->self.root_hub->do_remote_wakeup)
+ mask &= ~STS_PCD;
+ ehci_writel(ehci, mask, &ehci->regs->intr_enable);
+ ehci_readl(ehci, &ehci->regs->intr_enable);
+ skip:
+ spin_unlock_irq(&ehci->lock);
+ return 0;
+ }
+
+ /*
+ * Else reset, to cope with power loss or resume from hibernation
+ * having let the firmware kick in during reboot.
+ */
+ usb_root_hub_lost_power(hcd->self.root_hub);
+ (void) ehci_halt(ehci);
+ (void) ehci_reset(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto skip;
+
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+
+ ehci->rh_state = EHCI_RH_SUSPENDED;
+ spin_unlock_irq(&ehci->lock);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(ehci_resume);
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Generic structure: This gets copied for platform drivers so that
+ * individual entries can be overridden as needed.
+ */
+
+static const struct hc_driver ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ /*
+ * device support
+ */
+ .free_dev = ehci_remove_device,
+};
+
+void ehci_init_driver(struct hc_driver *drv,
+ const struct ehci_driver_overrides *over)
+{
+ /* Copy the generic table to drv and then apply the overrides */
+ *drv = ehci_hc_driver;
+
+ if (over) {
+ drv->hcd_priv_size += over->extra_priv_size;
+ if (over->reset)
+ drv->reset = over->reset;
+ }
+}
+EXPORT_SYMBOL_GPL(ehci_init_driver);
+
+/*-------------------------------------------------------------------------*/
+
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR (DRIVER_AUTHOR);
MODULE_LICENSE ("GPL");
-#ifdef CONFIG_PCI
-#include "ehci-pci.c"
-#define PCI_DRIVER ehci_pci_driver
-#endif
-
#ifdef CONFIG_USB_EHCI_FSL
#include "ehci-fsl.c"
#define PLATFORM_DRIVER ehci_fsl_driver
#endif
-#ifdef CONFIG_SOC_AU1200
-#include "ehci-au1xxx.c"
-#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
+#ifdef CONFIG_USB_EHCI_SH
+#include "ehci-sh.c"
+#define PLATFORM_DRIVER ehci_hcd_sh_driver
#endif
#ifdef CONFIG_PPC_PS3
@@ -1024,19 +1265,39 @@ MODULE_LICENSE ("GPL");
#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
#endif
-#ifdef CONFIG_PLAT_ORION
-#include "ehci-orion.c"
-#define PLATFORM_DRIVER ehci_orion_driver
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+#include "ehci-xilinx-of.c"
+#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
+#endif
+
+#ifdef CONFIG_USB_OCTEON_EHCI
+#include "ehci-octeon.c"
+#define PLATFORM_DRIVER ehci_octeon_driver
+#endif
+
+#ifdef CONFIG_TILE_USB
+#include "ehci-tilegx.c"
+#define PLATFORM_DRIVER ehci_hcd_tilegx_driver
#endif
-#ifdef CONFIG_ARCH_IXP4XX
-#include "ehci-ixp4xx.c"
-#define PLATFORM_DRIVER ixp4xx_ehci_driver
+#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
+#include "ehci-pmcmsp.c"
+#define PLATFORM_DRIVER ehci_hcd_msp_driver
#endif
-#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
- !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER)
-#error "missing bus glue for ehci-hcd"
+#ifdef CONFIG_SPARC_LEON
+#include "ehci-grlib.c"
+#define PLATFORM_DRIVER ehci_grlib_driver
+#endif
+
+#ifdef CONFIG_USB_EHCI_MV
+#include "ehci-mv.c"
+#define PLATFORM_DRIVER ehci_mv_driver
+#endif
+
+#ifdef CONFIG_MIPS_SEAD3
+#include "ehci-sead3.c"
+#define PLATFORM_DRIVER ehci_hcd_sead3_driver
#endif
static int __init ehci_hcd_init(void)
@@ -1058,8 +1319,8 @@ static int __init ehci_hcd_init(void)
sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
-#ifdef DEBUG
- ehci_debug_root = debugfs_create_dir("ehci", NULL);
+#ifdef CONFIG_DYNAMIC_DEBUG
+ ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
goto err_debug;
@@ -1072,12 +1333,6 @@ static int __init ehci_hcd_init(void)
goto clean0;
#endif
-#ifdef PCI_DRIVER
- retval = pci_register_driver(&PCI_DRIVER);
- if (retval < 0)
- goto clean1;
-#endif
-
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
if (retval < 0)
@@ -1085,29 +1340,35 @@ static int __init ehci_hcd_init(void)
#endif
#ifdef OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto clean3;
#endif
+
+#ifdef XILINX_OF_PLATFORM_DRIVER
+ retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
+ if (retval < 0)
+ goto clean4;
+#endif
return retval;
+#ifdef XILINX_OF_PLATFORM_DRIVER
+ /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
+clean4:
+#endif
#ifdef OF_PLATFORM_DRIVER
- /* of_unregister_platform_driver(&OF_PLATFORM_DRIVER); */
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
clean3:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
clean2:
#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
-clean1:
-#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
clean0:
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
err_debug:
@@ -1119,22 +1380,21 @@ module_init(ehci_hcd_init);
static void __exit ehci_hcd_cleanup(void)
{
+#ifdef XILINX_OF_PLATFORM_DRIVER
+ platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
+#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
-#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ehci_hcd_cleanup);
-
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 97a53a48a3d..cc305c71ac3 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -27,19 +27,17 @@
*/
/*-------------------------------------------------------------------------*/
+#include <linux/usb/otg.h>
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
#ifdef CONFIG_PM
-static int ehci_hub_control(
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-);
+static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
+{
+ return !udev->maxchild && udev->persist_enabled &&
+ udev->bus->root_hub->speed < USB_SPEED_HIGH;
+}
/* After a power loss, ports that were owned by the companion must be
* reset so that the companion can still own them.
@@ -55,9 +53,33 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
if (!ehci->owned_ports)
return;
+ /*
+ * USB 1.1 devices are mostly HIDs, which don't need to persist across
+ * suspends. If we ensure that none of our companion's devices have
+ * persist_enabled (by looking through all USB 1.1 buses in the system),
+ * we can skip this and avoid slowing resume down. Devices without
+ * persist will just get reenumerated shortly after resume anyway.
+ */
+ if (!usb_for_each_dev(NULL, persist_enabled_on_companion))
+ return;
+
+ /* Make sure the ports are powered */
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ if (test_bit(port, &ehci->owned_ports)) {
+ reg = &ehci->regs->port_status[port];
+ status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+ if (!(status & PORT_POWER)) {
+ status |= PORT_POWER;
+ ehci_writel(ehci, status, reg);
+ }
+ }
+ }
+
/* Give the connections some time to appear */
msleep(20);
+ spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
@@ -69,23 +91,30 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
clear_bit(port, &ehci->owned_ports);
else if (test_bit(port, &ehci->companion_ports))
ehci_writel(ehci, status & ~PORT_PE, reg);
- else
+ else {
+ spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, SetPortFeature,
USB_PORT_FEAT_RESET, port + 1,
NULL, 0);
+ spin_lock_irq(&ehci->lock);
+ }
}
}
+ spin_unlock_irq(&ehci->lock);
if (!ehci->owned_ports)
return;
msleep(90); /* Wait for resets to complete */
+ spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
+ spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, GetPortStatus,
0, port + 1,
(char *) &buf, sizeof(buf));
+ spin_lock_irq(&ehci->lock);
/* The companion should now own the port,
* but if something went wrong the port must not
@@ -104,6 +133,94 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
}
ehci->owned_ports = 0;
+ spin_unlock_irq(&ehci->lock);
+}
+
+static int ehci_port_change(struct ehci_hcd *ehci)
+{
+ int i = HCS_N_PORTS(ehci->hcs_params);
+
+ /* First check if the controller indicates a change event */
+
+ if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)
+ return 1;
+
+ /*
+ * Not all controllers appear to update this while going from D3 to D0,
+ * so check the individual port status registers as well
+ */
+
+ while (i--)
+ if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC)
+ return 1;
+
+ return 0;
+}
+
+static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
+ bool suspending, bool do_wakeup)
+{
+ int port;
+ u32 temp;
+
+ /* If remote wakeup is enabled for the root hub but disabled
+ * for the controller, we must adjust all the port wakeup flags
+ * when the controller is suspended or resumed. In all other
+ * cases they don't need to be changed.
+ */
+ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
+ return;
+
+ spin_lock_irq(&ehci->lock);
+
+ /* clear phy low-power mode before changing wakeup flags */
+ if (ehci->has_tdi_phy_lpm) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
+
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
+ }
+ spin_unlock_irq(&ehci->lock);
+ msleep(5);
+ spin_lock_irq(&ehci->lock);
+ }
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *reg = &ehci->regs->port_status[port];
+ u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;
+
+ /* If we are suspending the controller, clear the flags.
+ * If we are resuming the controller, set the wakeup flags.
+ */
+ if (!suspending) {
+ if (t1 & PORT_CONNECT)
+ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+ else
+ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+ }
+ ehci_writel(ehci, t2, reg);
+ }
+
+ /* enter phy low-power mode again */
+ if (ehci->has_tdi_phy_lpm) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
+
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
+ }
+ }
+
+ /* Does the root hub have a port wakeup pending? */
+ if (!suspending && ehci_port_change(ehci))
+ usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+ spin_unlock_irq(&ehci->lock);
}
static int ehci_bus_suspend (struct usb_hcd *hcd)
@@ -111,24 +228,33 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
+ int changed;
+ bool fs_idle_delay;
ehci_dbg(ehci, "suspend root hub\n");
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
- del_timer_sync(&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
- port = HCS_N_PORTS (ehci->hcs_params);
+ /* stop the schedules */
+ ehci_quiesce(ehci);
+
spin_lock_irq (&ehci->lock);
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ goto done;
- /* stop schedules, clean any completed work */
- if (HC_IS_RUNNING(hcd->state)) {
- ehci_quiesce (ehci);
- hcd->state = HC_STATE_QUIESCING;
+ /* Once the controller is stopped, port resumes that are already
+ * in progress won't complete. Hence if remote wakeup is enabled
+ * for the root hub and any ports are in the middle of a resume or
+ * remote wakeup, we must fail the suspend.
+ */
+ if (hcd->self.root_hub->do_remote_wakeup) {
+ if (ehci->resuming_ports) {
+ spin_unlock_irq(&ehci->lock);
+ ehci_dbg(ehci, "suspend failed because a port is resuming\n");
+ return -EBUSY;
+ }
}
- ehci->command = ehci_readl(ehci, &ehci->regs->command);
- ehci_work(ehci);
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
@@ -137,10 +263,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
+ changed = 0;
+ fs_idle_delay = false;
+ port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
- u32 t2 = t1;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
@@ -150,18 +279,60 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
set_bit(port, &ehci->bus_suspended);
}
- /* enable remote wakeup on all ports */
- if (hcd->self.root_hub->do_remote_wakeup)
- t2 |= PORT_WAKE_BITS;
- else
- t2 &= ~PORT_WAKE_BITS;
+ /* enable remote wakeup on all ports, if told to do so */
+ if (hcd->self.root_hub->do_remote_wakeup) {
+ /* only enable appropriate wake bits, otherwise the
+ * hardware can not go phy low power mode. If a race
+ * condition happens here(connection change during bits
+ * set), the port change detection will finally fix it.
+ */
+ if (t1 & PORT_CONNECT)
+ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+ else
+ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+ }
if (t1 != t2) {
- ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
- port + 1, t1, t2);
+ /*
+ * On some controllers, Wake-On-Disconnect will
+ * generate false wakeup signals until the bus
+ * switches over to full-speed idle. For their
+ * sake, add a delay if we need one.
+ */
+ if ((t2 & PORT_WKDISC_E) &&
+ ehci_port_speed(ehci, t2) ==
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
ehci_writel(ehci, t2, reg);
+ changed = 1;
}
}
+ spin_unlock_irq(&ehci->lock);
+
+ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
+ /*
+ * Wait for HCD to enter low-power mode or for the bus
+ * to switch to full-speed idle.
+ */
+ usleep_range(5000, 5500);
+ }
+
+ if (changed && ehci->has_tdi_phy_lpm) {
+ spin_lock_irq(&ehci->lock);
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
+ u32 t3;
+
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
+ port, (t3 & HOSTPC_PHCD) ?
+ "succeeded" : "failed");
+ }
+ spin_unlock_irq(&ehci->lock);
+ }
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
@@ -169,10 +340,19 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
/* turn off now-idle HC */
ehci_halt (ehci);
- hcd->state = HC_STATE_SUSPENDED;
- if (ehci->reclaim)
- end_unlink_async(ehci);
+ spin_lock_irq(&ehci->lock);
+ if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD))
+ ehci_handle_controller_death(ehci);
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ goto done;
+ ehci->rh_state = EHCI_RH_SUSPENDED;
+
+ end_unlink_async(ehci);
+ unlink_empty_async_suspended(ehci);
+ ehci_handle_start_intr_unlinks(ehci);
+ ehci_handle_intr_unlinks(ehci);
+ end_free_itds(ehci);
/* allow remote wakeup */
mask = INTR_MASK;
@@ -181,8 +361,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
+ done:
ehci->next_statechange = jiffies + msecs_to_jiffies(10);
+ ehci->enabled_hrtimer_events = 0;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
spin_unlock_irq (&ehci->lock);
+
+ hrtimer_cancel(&ehci->hrtimer);
return 0;
}
@@ -194,14 +379,19 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
u32 temp;
u32 power_okay;
int i;
- u8 resume_needed = 0;
+ unsigned long resume_needed = 0;
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
- spin_unlock_irq(&ehci->lock);
- return -ESHUTDOWN;
+ if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown)
+ goto shutdown;
+
+ if (unlikely(ehci->debug)) {
+ if (!dbgp_reset_prep(hcd))
+ ehci->debug = NULL;
+ else
+ dbgp_external_startup(hcd);
}
/* Ideally and we've got a real resume here, and no port's power
@@ -225,13 +415,50 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
+ ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci->rh_state = EHCI_RH_RUNNING;
- /* Some controller/firmware combinations need a delay during which
- * they set up the port statuses. See Bugzilla #8190. */
- spin_unlock_irq(&ehci->lock);
- msleep(8);
- spin_lock_irq(&ehci->lock);
+ /*
+ * According to Bugzilla #8190, the port status for some controllers
+ * will be wrong without a delay. At their wrong status, the port
+ * is enabled, but not suspended neither resumed.
+ */
+ i = HCS_N_PORTS(ehci->hcs_params);
+ while (i--) {
+ temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
+ if ((temp & PORT_PE) &&
+ !(temp & (PORT_SUSPEND | PORT_RESUME))) {
+ ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp);
+ spin_unlock_irq(&ehci->lock);
+ msleep(8);
+ spin_lock_irq(&ehci->lock);
+ break;
+ }
+ }
+
+ if (ehci->shutdown)
+ goto shutdown;
+
+ /* clear phy low-power mode before resume */
+ if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) {
+ i = HCS_N_PORTS(ehci->hcs_params);
+ while (i--) {
+ if (test_bit(i, &ehci->bus_suspended)) {
+ u32 __iomem *hostpc_reg =
+ &ehci->regs->hostpc[i];
+
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD,
+ hostpc_reg);
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+ msleep(5);
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
+ }
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
@@ -241,7 +468,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
if (test_bit(i, &ehci->bus_suspended) &&
(temp & PORT_SUSPEND)) {
temp |= PORT_RESUME;
- resume_needed = 1;
+ set_bit(i, &resume_needed);
}
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
@@ -251,40 +478,37 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
spin_unlock_irq(&ehci->lock);
msleep(20);
spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
}
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
- if (test_bit(i, &ehci->bus_suspended) &&
- (temp & PORT_SUSPEND)) {
- temp &= ~(PORT_RWC_BITS | PORT_RESUME);
+ if (test_bit(i, &resume_needed)) {
+ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
- ehci_vdbg (ehci, "resumed port %d\n", i + 1);
}
}
- (void) ehci_readl(ehci, &ehci->regs->command);
-
- /* maybe re-activate the schedule(s) */
- temp = 0;
- if (ehci->async->qh_next.qh)
- temp |= CMD_ASE;
- if (ehci->periodic_sched)
- temp |= CMD_PSE;
- if (temp) {
- ehci->command |= temp;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- }
ehci->next_statechange = jiffies + msecs_to_jiffies(5);
- hcd->state = HC_STATE_RUNNING;
+ spin_unlock_irq(&ehci->lock);
+
+ ehci_handover_companion_ports(ehci);
/* Now we can safely re-enable irqs */
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+ (void) ehci_readl(ehci, &ehci->regs->intr_enable);
+ spin_unlock_irq(&ehci->lock);
- spin_unlock_irq (&ehci->lock);
- ehci_handover_companion_ports(ehci);
return 0;
+
+ shutdown:
+ spin_unlock_irq(&ehci->lock);
+ return -ESHUTDOWN;
}
#else
@@ -296,29 +520,6 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-/* Display the ports dedicated to the companion controller */
-static ssize_t show_companion(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ehci_hcd *ehci;
- int nports, index, n;
- int count = PAGE_SIZE;
- char *ptr = buf;
-
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
- nports = HCS_N_PORTS(ehci->hcs_params);
-
- for (index = 0; index < nports; ++index) {
- if (test_bit(index, &ehci->companion_ports)) {
- n = scnprintf(ptr, count, "%d\n", index + 1);
- ptr += n;
- count -= n;
- }
- }
- return ptr - buf;
-}
-
/*
* Sets the owner of a port
*/
@@ -353,57 +554,6 @@ static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner)
}
}
-/*
- * Dedicate or undedicate a port to the companion controller.
- * Syntax is "[-]portnum", where a leading '-' sign means
- * return control of the port to the EHCI controller.
- */
-static ssize_t store_companion(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ehci_hcd *ehci;
- int portnum, new_owner;
-
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
- new_owner = PORT_OWNER; /* Owned by companion */
- if (sscanf(buf, "%d", &portnum) != 1)
- return -EINVAL;
- if (portnum < 0) {
- portnum = - portnum;
- new_owner = 0; /* Owned by EHCI */
- }
- if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
- return -ENOENT;
- portnum--;
- if (new_owner)
- set_bit(portnum, &ehci->companion_ports);
- else
- clear_bit(portnum, &ehci->companion_ports);
- set_owner(ehci, portnum, new_owner);
- return count;
-}
-static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
-
-static inline void create_companion_file(struct ehci_hcd *ehci)
-{
- int i;
-
- /* with integrated TT there is no companion! */
- if (!ehci_is_TDI(ehci))
- i = device_create_file(ehci_to_hcd(ehci)->self.dev,
- &dev_attr_companion);
-}
-
-static inline void remove_companion_file(struct ehci_hcd *ehci)
-{
- /* with integrated TT there is no companion! */
- if (!ehci_is_TDI(ehci))
- device_remove_file(ehci_to_hcd(ehci)->self.dev,
- &dev_attr_companion);
-}
-
-
/*-------------------------------------------------------------------------*/
static int check_reset_complete (
@@ -438,7 +588,8 @@ static int check_reset_complete (
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 1);
} else {
- ehci_dbg (ehci, "port %d high speed\n", index + 1);
+ ehci_dbg(ehci, "port %d reset complete, port enabled\n",
+ index + 1);
/* ensure 440EPx ohci controller state is suspended */
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 0);
@@ -456,14 +607,11 @@ static int
ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- u32 temp, status = 0;
+ u32 temp, status;
u32 mask;
int ports, i, retval = 1;
unsigned long flags;
-
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
- if (!HC_IS_RUNNING(hcd->state))
- return 0;
+ u32 ppcd = ~0;
/* init status to no-changes */
buf [0] = 0;
@@ -473,6 +621,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
retval++;
}
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = ehci->resuming_ports;
+
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
@@ -489,8 +642,17 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
/* port N changes (bit N)? */
spin_lock_irqsave (&ehci->lock, flags);
+
+ /* get per-port change detect bits */
+ if (ehci->has_ppcd)
+ ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16;
+
for (i = 0; i < ports; i++) {
- temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
+ /* leverage per-port change bits feature */
+ if (ppcd & (1 << i))
+ temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
+ else
+ temp = 0;
/*
* Return status information even for ports with OWNER set.
@@ -509,7 +671,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
status = STS_PCD;
}
}
- /* FIXME autosuspend idle root hubs */
+
+ /* If a resume is in progress, make sure it can finish */
+ if (ehci->resuming_ports)
+ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
+
spin_unlock_irqrestore (&ehci->lock, flags);
return status ? retval : 0;
}
@@ -533,8 +699,8 @@ ehci_hub_descriptor (
desc->bDescLength = 7 + 2 * temp;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset (&desc->bitmap [0], 0, temp);
- memset (&desc->bitmap [temp], 0xff, temp);
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC (ehci->hcs_params))
@@ -550,8 +716,147 @@ ehci_hub_descriptor (
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+
+static void usb_ehset_completion(struct urb *urb)
+{
+ struct completion *done = urb->context;
+
+ complete(done);
+}
+static int submit_single_step_set_feature(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ int is_setup
+);
+
+/*
+ * Allocate and initialize a control URB. This request will be used by the
+ * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
+ * of the GetDescriptor request are sent 15 seconds after the SETUP stage.
+ * Return NULL if failed.
+ */
+static struct urb *request_single_step_set_feature_urb(
+ struct usb_device *udev,
+ void *dr,
+ void *buf,
+ struct completion *done
+) {
+ struct urb *urb;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct usb_host_endpoint *ep;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return NULL;
+
+ urb->pipe = usb_rcvctrlpipe(udev, 0);
+ ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
+ [usb_pipeendpoint(urb->pipe)];
+ if (!ep) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->ep = ep;
+ urb->dev = udev;
+ urb->setup_packet = (void *)dr;
+ urb->transfer_buffer = buf;
+ urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+ urb->complete = usb_ehset_completion;
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags = URB_DIR_IN;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ urb->setup_dma = dma_map_single(
+ hcd->self.controller,
+ urb->setup_packet,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ urb->transfer_dma = dma_map_single(
+ hcd->self.controller,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ DMA_FROM_DEVICE);
+ urb->context = done;
+ return urb;
+}
+
+static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ int retval = -ENOMEM;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct usb_device *udev;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct usb_device_descriptor *buf;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ /* Obtain udev of the rhub's child port */
+ udev = usb_hub_find_child(hcd->self.root_hub, port);
+ if (!udev) {
+ ehci_err(ehci, "No device attached to the RootHub\n");
+ return -ENODEV;
+ }
+ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!dr) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Fill Setup packet for GetDescriptor */
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
+ if (!urb)
+ goto cleanup;
+
+ /* Submit just the SETUP stage */
+ retval = submit_single_step_set_feature(hcd, urb, 1);
+ if (retval)
+ goto out1;
+ if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
+ goto out1;
+ }
+ msleep(15 * 1000);
+
+ /* Complete remaining DATA and STATUS stages using the same URB */
+ urb->status = -EINPROGRESS;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ retval = submit_single_step_set_feature(hcd, urb, 0);
+ if (!retval && !wait_for_completion_timeout(&done,
+ msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
+ }
+out1:
+ usb_free_urb(urb);
+cleanup:
+ kfree(dr);
+ kfree(buf);
+ return retval;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
+/*-------------------------------------------------------------------------*/
-static int ehci_hub_control (
+int ehci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -563,7 +868,8 @@ static int ehci_hub_control (
int ports = HCS_N_PORTS (ehci->hcs_params);
u32 __iomem *status_reg = &ehci->regs->port_status[
(wIndex & 0xff) - 1];
- u32 temp, status;
+ u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
+ u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
@@ -592,12 +898,13 @@ static int ehci_hub_control (
goto error;
wIndex--;
temp = ehci_readl(ehci, status_reg);
+ temp &= ~PORT_RWC_BITS;
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
@@ -605,41 +912,55 @@ static int ehci_hub_control (
ehci_writel(ehci, temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
- ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_PEC,
- status_reg);
+ ehci_writel(ehci, temp | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (ehci->no_selective_suspend)
break;
- if (temp & PORT_SUSPEND) {
- if ((temp & PORT_PE) == 0)
- goto error;
- /* resume signaling for 20 msec */
- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- ehci_writel(ehci, temp | PORT_RESUME,
- status_reg);
- ehci->reset_done [wIndex] = jiffies
- + msecs_to_jiffies (20);
+#ifdef CONFIG_USB_OTG
+ if ((hcd->self.otg_port == (wIndex + 1))
+ && hcd->self.b_hnp_enable) {
+ otg_start_hnp(hcd->phy->otg);
+ break;
+ }
+#endif
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* clear phy low-power mode before resume */
+ if (ehci->has_tdi_phy_lpm) {
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
+ hostpc_reg);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(5);/* wait to leave low-power mode */
+ spin_lock_irqsave(&ehci->lock, flags);
}
+ /* resume signaling for 20 msec */
+ temp &= ~PORT_WAKE_BITS;
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ ehci->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ set_bit(wIndex, &ehci->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC (ehci->hcs_params))
- ehci_writel(ehci,
- temp & ~(PORT_RWC_BITS | PORT_POWER),
- status_reg);
+ ehci_writel(ehci, temp & ~PORT_POWER,
+ status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
- ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
- status_reg);
+ ehci_writel(ehci, temp | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_OCC,
- status_reg);
+ ehci_writel(ehci, temp | PORT_OCC, status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
@@ -667,12 +988,12 @@ static int ehci_hub_control (
// wPortChange bits
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
- status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc){
- status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
* Hubs should disable port power on over-current.
@@ -681,55 +1002,57 @@ static int ehci_hub_control (
* power switching; they're allowed to just limit the
* current. khubd will turn the power back on.
*/
- if (HCS_PPC (ehci->hcs_params)){
+ if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle))
+ && HCS_PPC(ehci->hcs_params)) {
ehci_writel(ehci,
temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
+ temp = ehci_readl(ehci, status_reg);
}
}
- /* whoever resumes must GetPortStatus to complete it!! */
- if (temp & PORT_RESUME) {
+ /* no reset or resume pending */
+ if (!ehci->reset_done[wIndex]) {
/* Remote Wakeup received? */
- if (!ehci->reset_done[wIndex]) {
+ if (temp & PORT_RESUME) {
/* resume signaling for 20 msec */
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
+ set_bit(wIndex, &ehci->resuming_ports);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
}
- /* resume completed? */
- else if (time_after_eq(jiffies,
- ehci->reset_done[wIndex])) {
- clear_bit(wIndex, &ehci->suspended_ports);
- set_bit(wIndex, &ehci->port_c_suspend);
- ehci->reset_done[wIndex] = 0;
+ /* reset or resume not yet complete */
+ } else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) {
+ ; /* wait until it is complete */
- /* stop resume signaling */
- temp = ehci_readl(ehci, status_reg);
- ehci_writel(ehci,
- temp & ~(PORT_RWC_BITS | PORT_RESUME),
- status_reg);
- retval = handshake(ehci, status_reg,
- PORT_RESUME, 0, 2000 /* 2msec */);
- if (retval != 0) {
- ehci_err(ehci,
- "port %d resume error %d\n",
+ /* resume completed */
+ } else if (test_bit(wIndex, &ehci->resuming_ports)) {
+ clear_bit(wIndex, &ehci->suspended_ports);
+ set_bit(wIndex, &ehci->port_c_suspend);
+ ehci->reset_done[wIndex] = 0;
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
+
+ /* stop resume signaling */
+ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
+ ehci_writel(ehci, temp, status_reg);
+ clear_bit(wIndex, &ehci->resuming_ports);
+ retval = ehci_handshake(ehci, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ ehci_err(ehci, "port %d resume error %d\n",
wIndex + 1, retval);
- goto error;
- }
- temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ goto error;
}
- }
+ temp = ehci_readl(ehci, status_reg);
/* whoever resets must GetPortStatus to complete it!! */
- if ((temp & PORT_RESET)
- && time_after_eq(jiffies,
- ehci->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ } else {
+ status |= USB_PORT_STAT_C_RESET << 16;
ehci->reset_done [wIndex] = 0;
/* force reset to complete */
@@ -738,8 +1061,8 @@ static int ehci_hub_control (
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
- retval = handshake(ehci, status_reg,
- PORT_RESET, 0, 750);
+ retval = ehci_handshake(ehci, status_reg,
+ PORT_RESET, 0, 1000);
if (retval != 0) {
ehci_err (ehci, "port %d reset error %d\n",
wIndex + 1, retval);
@@ -751,9 +1074,6 @@ static int ehci_hub_control (
ehci_readl(ehci, status_reg));
}
- if (!(temp & (PORT_RESUME|PORT_RESET)))
- ehci->reset_done[wIndex] = 0;
-
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &ehci->companion_ports)) {
@@ -771,36 +1091,40 @@ static int ehci_hub_control (
*/
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
// status may be from integrated TT
- status |= ehci_port_speed(ehci, temp);
+ if (ehci->has_hostpc) {
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ status |= ehci_port_speed(ehci, temp1);
+ } else
+ status |= ehci_port_speed(ehci, temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
/* maybe the port was unsuspended without our knowledge */
if (temp & (PORT_SUSPEND|PORT_RESUME)) {
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
} else if (test_bit(wIndex, &ehci->suspended_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
+ clear_bit(wIndex, &ehci->resuming_ports);
ehci->reset_done[wIndex] = 0;
if (temp & PORT_PE)
set_bit(wIndex, &ehci->port_c_suspend);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
if (temp & PORT_OC)
- status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
if (test_bit(wIndex, &ehci->port_c_suspend))
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
-#ifndef VERBOSE_DEBUG
- if (status & ~0xffff) /* only if wPortChange is interesting */
-#endif
- dbg_port (ehci, "GetStatus", wIndex + 1, temp);
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(ehci, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
@@ -816,6 +1140,15 @@ static int ehci_hub_control (
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
+ if (unlikely(ehci->debug)) {
+ /* If the debug port is active any port
+ * feature requests should get denied */
+ if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) &&
+ (readl(&ehci->debug->control) & DBGP_ENABLED)) {
+ retval = -ENODEV;
+ goto error_exit;
+ }
+ }
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
@@ -831,7 +1164,26 @@ static int ehci_hub_control (
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
+
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have tdi_phy_lpm feature
+ */
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+ if (ehci->has_tdi_phy_lpm) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(5);/* 5ms for HCD enter low pwr mode */
+ spin_lock_irqsave(&ehci->lock, flags);
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp1 | HOSTPC_PHCD,
+ hostpc_reg);
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
+ wIndex, (temp1 & HOSTPC_PHCD) ?
+ "succeeded" : "failed");
+ }
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
@@ -840,7 +1192,7 @@ static int ehci_hub_control (
status_reg);
break;
case USB_PORT_FEAT_RESET:
- if (temp & PORT_RESUME)
+ if (temp & (PORT_SUSPEND|PORT_RESUME))
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
@@ -854,7 +1206,6 @@ static int ehci_hub_control (
wIndex + 1);
temp |= PORT_OWNER;
} else {
- ehci_vdbg (ehci, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
@@ -875,10 +1226,37 @@ static int ehci_hub_control (
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
+#ifdef CONFIG_USB_HCD_TEST_MODE
+ if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ retval = ehset_single_step_set_feature(hcd,
+ wIndex);
+ spin_lock_irqsave(&ehci->lock, flags);
+ break;
+ }
+#endif
if (!selector || selector > 5)
goto error;
+ spin_unlock_irqrestore(&ehci->lock, flags);
ehci_quiesce(ehci);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /* Put all enabled ports into suspend */
+ while (ports--) {
+ u32 __iomem *sreg =
+ &ehci->regs->port_status[ports];
+
+ temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ ehci_writel(ehci, temp | PORT_SUSPEND,
+ sreg);
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
ehci_halt(ehci);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ temp = ehci_readl(ehci, status_reg);
temp |= selector << 16;
ehci_writel(ehci, temp, status_reg);
break;
@@ -894,9 +1272,11 @@ error:
/* "stall" on error */
retval = -EPIPE;
}
+error_exit:
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(ehci_hub_control);
static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
{
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
deleted file mode 100644
index 9c32063a0c2..00000000000
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * IXP4XX EHCI Host Controller Driver
- *
- * Author: Vladimir Barinov <vbarinov@embeddedalley.com>
- *
- * Based on "ehci-fsl.c" by Randy Vinson <rvinson@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/platform_device.h>
-
-static int ixp4xx_ehci_init(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval = 0;
-
- ehci->big_endian_desc = 1;
- ehci->big_endian_mmio = 1;
-
- ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100
- + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
- hcd->has_tt = 1;
- ehci_reset(ehci);
-
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci_port_power(ehci, 0);
-
- return retval;
-}
-
-static const struct hc_driver ixp4xx_ehci_hc_driver = {
- .description = hcd_name,
- .product_desc = "IXP4XX EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
- .reset = ixp4xx_ehci_init,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .get_frame_number = ehci_get_frame,
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
-#if defined(CONFIG_PM)
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
-#endif
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-};
-
-static int ixp4xx_ehci_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- const struct hc_driver *driver = &ixp4xx_ehci_hc_driver;
- struct resource *res;
- int irq;
- int retval;
-
- if (usb_disabled())
- return -ENODEV;
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(&pdev->dev));
- return -ENODEV;
- }
- irq = res->start;
-
- hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd) {
- retval = -ENOMEM;
- goto fail_create_hcd;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(&pdev->dev));
- retval = -ENODEV;
- goto fail_request_resource;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- retval = -EBUSY;
- goto fail_request_resource;
- }
-
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- retval = -EFAULT;
- goto fail_ioremap;
- }
-
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (retval)
- goto fail_add_hcd;
-
- return retval;
-
-fail_add_hcd:
- iounmap(hcd->regs);
-fail_ioremap:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-fail_request_resource:
- usb_put_hcd(hcd);
-fail_create_hcd:
- dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
- return retval;
-}
-
-static int ixp4xx_ehci_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-MODULE_ALIAS("platform:ixp4xx-ehci");
-
-static struct platform_driver ixp4xx_ehci_driver = {
- .probe = ixp4xx_ehci_probe,
- .remove = ixp4xx_ehci_remove,
- .driver = {
- .name = "ixp4xx-ehci",
- },
-};
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 0431397836f..c0fb6a8ae6a 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -40,7 +40,7 @@ static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
{
memset (qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
- qtd->hw_token = cpu_to_le32 (QTD_STS_HALT);
+ qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END(ehci);
qtd->hw_alt_next = EHCI_LIST_END(ehci);
INIT_LIST_HEAD (&qtd->qtd_list);
@@ -64,10 +64,8 @@ static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
}
-static void qh_destroy(struct ehci_qh *qh)
+static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- struct ehci_hcd *ehci = qh->ehci;
-
/* clean qtds first, and know this is not linked */
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
ehci_dbg (ehci, "unused qh not empty!\n");
@@ -75,7 +73,8 @@ static void qh_destroy(struct ehci_qh *qh)
}
if (qh->dummy)
ehci_qtd_free (ehci, qh->dummy);
- dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
+ dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
+ kfree(qh);
}
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
@@ -83,40 +82,32 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
struct ehci_qh *qh;
dma_addr_t dma;
- qh = (struct ehci_qh *)
- dma_pool_alloc (ehci->qh_pool, flags, &dma);
+ qh = kzalloc(sizeof *qh, GFP_ATOMIC);
if (!qh)
- return qh;
-
- memset (qh, 0, sizeof *qh);
- qh->refcount = 1;
- qh->ehci = ehci;
+ goto done;
+ qh->hw = (struct ehci_qh_hw *)
+ dma_pool_alloc(ehci->qh_pool, flags, &dma);
+ if (!qh->hw)
+ goto fail;
+ memset(qh->hw, 0, sizeof *qh->hw);
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
+ INIT_LIST_HEAD(&qh->unlink_node);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);
if (qh->dummy == NULL) {
ehci_dbg (ehci, "no dummy td\n");
- dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
- qh = NULL;
+ goto fail1;
}
+done:
return qh;
-}
-
-/* to share a qh (cpu threads, or hc) */
-static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
-{
- WARN_ON(!qh->refcount);
- qh->refcount++;
- return qh;
-}
-
-static inline void qh_put (struct ehci_qh *qh)
-{
- if (!--qh->refcount)
- qh_destroy(qh);
+fail1:
+ dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
+fail:
+ kfree(qh);
+ return NULL;
}
/*-------------------------------------------------------------------------*/
@@ -129,9 +120,13 @@ static inline void qh_put (struct ehci_qh *qh)
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
if (ehci->async)
- qh_put (ehci->async);
+ qh_destroy(ehci, ehci->async);
ehci->async = NULL;
+ if (ehci->dummy)
+ qh_destroy(ehci, ehci->dummy);
+ ehci->dummy = NULL;
+
/* DMA consistent memory and pools */
if (ehci->qtd_pool)
dma_pool_destroy (ehci->qtd_pool);
@@ -179,7 +174,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* QHs for control/bulk/intr transfers */
ehci->qh_pool = dma_pool_create ("ehci_qh",
ehci_to_hcd(ehci)->self.controller,
- sizeof (struct ehci_qh),
+ sizeof(struct ehci_qh_hw),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qh_pool) {
@@ -218,8 +213,26 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
if (ehci->periodic == NULL) {
goto fail;
}
- for (i = 0; i < ehci->periodic_size; i++)
- ehci->periodic [i] = EHCI_LIST_END(ehci);
+
+ if (ehci->use_dummy_qh) {
+ struct ehci_qh_hw *hw;
+ ehci->dummy = ehci_qh_alloc(ehci, flags);
+ if (!ehci->dummy)
+ goto fail;
+
+ hw = ehci->dummy->hw;
+ hw->hw_next = EHCI_LIST_END(ehci);
+ hw->hw_qtd_next = EHCI_LIST_END(ehci);
+ hw->hw_alt_next = EHCI_LIST_END(ehci);
+ ehci->dummy->hw = hw;
+
+ for (i = 0; i < ehci->periodic_size; i++)
+ ehci->periodic[i] = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
+ } else {
+ for (i = 0; i < ehci->periodic_size; i++)
+ ehci->periodic[i] = EHCI_LIST_END(ehci);
+ }
/* software shadow of hardware table */
ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
new file mode 100644
index 00000000000..982c09bebe0
--- /dev/null
+++ b/drivers/usb/host/ehci-msm.c
@@ -0,0 +1,232 @@
+/* ehci-msm.c - HSUSB Host Controller Driver Implementation
+ *
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * Partly derived from ehci-fsl.c and ehci-hcd.c
+ * Copyright (c) 2000-2004 by David Brownell
+ * Copyright (c) 2005 MontaVista Software
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define MSM_USB_BASE (hcd->regs)
+
+#define DRIVER_DESC "Qualcomm On-Chip EHCI Host Controller"
+
+static const char hcd_name[] = "ehci-msm";
+static struct hc_driver __read_mostly msm_hc_driver;
+
+static int ehci_msm_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci->caps = USB_CAPLENGTH;
+ hcd->has_tt = 1;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ /* bursts of unspecified length. */
+ writel(0, USB_AHBBURST);
+ /* Use the AHB transactor */
+ writel(0, USB_AHBMODE);
+ /* Disable streaming mode and select host mode */
+ writel(0x13, USB_USBMODE);
+
+ return 0;
+}
+
+static int ehci_msm_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res;
+ struct usb_phy *phy;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ehci_msm proble\n");
+
+ hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+
+ hcd->irq = platform_get_irq(pdev, 0);
+ if (hcd->irq < 0) {
+ dev_err(&pdev->dev, "Unable to get IRQ resource\n");
+ ret = hcd->irq;
+ goto put_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get memory resource\n");
+ ret = -ENODEV;
+ goto put_hcd;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto put_hcd;
+ }
+
+ /*
+ * OTG driver takes care of PHY initialization, clock management,
+ * powering up VBUS, mapping of registers address space and power
+ * management.
+ */
+ if (pdev->dev.of_node)
+ phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+ else
+ phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "unable to find transceiver\n");
+ ret = -EPROBE_DEFER;
+ goto put_hcd;
+ }
+
+ ret = otg_set_host(phy->otg, &hcd->self);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register with transceiver\n");
+ goto put_hcd;
+ }
+
+ hcd->phy = phy;
+ device_init_wakeup(&pdev->dev, 1);
+ /*
+ * OTG device parent of HCD takes care of putting
+ * hardware into low power mode.
+ */
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* FIXME: need to call usb_add_hcd() here? */
+
+ return 0;
+
+put_hcd:
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+static int ehci_msm_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ otg_set_host(hcd->phy->otg, NULL);
+
+ /* FIXME: need to call usb_remove_hcd() here? */
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ehci_msm_pm_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ dev_dbg(dev, "ehci-msm PM suspend\n");
+
+ return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_msm_pm_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "ehci-msm PM resume\n");
+ ehci_resume(hcd, false);
+
+ return 0;
+}
+#else
+#define ehci_msm_pm_suspend NULL
+#define ehci_msm_pm_resume NULL
+#endif
+
+static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
+ .suspend = ehci_msm_pm_suspend,
+ .resume = ehci_msm_pm_resume,
+};
+
+static struct of_device_id msm_ehci_dt_match[] = {
+ { .compatible = "qcom,ehci-host", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_ehci_dt_match);
+
+static struct platform_driver ehci_msm_driver = {
+ .probe = ehci_msm_probe,
+ .remove = ehci_msm_remove,
+ .driver = {
+ .name = "msm_hsusb_host",
+ .pm = &ehci_msm_dev_pm_ops,
+ .of_match_table = msm_ehci_dt_match,
+ },
+};
+
+static const struct ehci_driver_overrides msm_overrides __initdata = {
+ .reset = ehci_msm_reset,
+};
+
+static int __init ehci_msm_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ehci_init_driver(&msm_hc_driver, &msm_overrides);
+ return platform_driver_register(&ehci_msm_driver);
+}
+module_init(ehci_msm_init);
+
+static void __exit ehci_msm_cleanup(void)
+{
+ platform_driver_unregister(&ehci_msm_driver);
+}
+module_exit(ehci_msm_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:msm-ehci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
new file mode 100644
index 00000000000..08147c35f83
--- /dev/null
+++ b/drivers/usb/host/ehci-mv.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_data/mv_usb.h>
+
+#define CAPLENGTH_MASK (0xff)
+
+struct ehci_hcd_mv {
+ struct usb_hcd *hcd;
+
+ /* Which mode does this ehci running OTG/Host ? */
+ int mode;
+
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ void __iomem *op_regs;
+
+ struct usb_phy *otg;
+
+ struct mv_usb_platform_data *pdata;
+
+ struct clk *clk;
+};
+
+static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ clk_prepare_enable(ehci_mv->clk);
+}
+
+static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ clk_disable_unprepare(ehci_mv->clk);
+}
+
+static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ int retval;
+
+ ehci_clock_enable(ehci_mv);
+ if (ehci_mv->pdata->phy_init) {
+ retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
+ if (retval)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ if (ehci_mv->pdata->phy_deinit)
+ ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ ehci_clock_disable(ehci_mv);
+}
+
+static int mv_ehci_reset(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ int retval;
+
+ if (ehci_mv == NULL) {
+ dev_err(dev, "Can not find private ehci data\n");
+ return -ENODEV;
+ }
+
+ hcd->has_tt = 1;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ dev_err(dev, "ehci_setup failed %d\n", retval);
+
+ return retval;
+}
+
+static const struct hc_driver mv_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Marvell EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = mv_ehci_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+};
+
+static int mv_ehci_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct ehci_hcd_mv *ehci_mv;
+ struct resource *r;
+ int retval = -ENODEV;
+ u32 offset;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ if (!hcd)
+ return -ENOMEM;
+
+ ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
+ if (ehci_mv == NULL) {
+ dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
+ retval = -ENOMEM;
+ goto err_put_hcd;
+ }
+
+ platform_set_drvdata(pdev, ehci_mv);
+ ehci_mv->pdata = pdata;
+ ehci_mv->hcd = hcd;
+
+ ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ehci_mv->clk)) {
+ dev_err(&pdev->dev, "error getting clock\n");
+ retval = PTR_ERR(ehci_mv->clk);
+ goto err_put_hcd;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_put_hcd;
+ }
+
+ ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->phy_regs)) {
+ retval = PTR_ERR(ehci_mv->phy_regs);
+ goto err_put_hcd;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
+ if (!r) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_put_hcd;
+ }
+
+ ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->cap_regs)) {
+ retval = PTR_ERR(ehci_mv->cap_regs);
+ goto err_put_hcd;
+ }
+
+ retval = mv_ehci_enable(ehci_mv);
+ if (retval) {
+ dev_err(&pdev->dev, "init phy error %d\n", retval);
+ goto err_put_hcd;
+ }
+
+ offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
+ ehci_mv->op_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
+
+ hcd->rsrc_start = r->start;
+ hcd->rsrc_len = resource_size(r);
+ hcd->regs = ehci_mv->op_regs;
+
+ hcd->irq = platform_get_irq(pdev, 0);
+ if (!hcd->irq) {
+ dev_err(&pdev->dev, "Cannot get irq.");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+
+ ehci_mv->mode = pdata->mode;
+ if (ehci_mv->mode == MV_USB_MODE_OTG) {
+ ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(ehci_mv->otg)) {
+ retval = PTR_ERR(ehci_mv->otg);
+
+ if (retval == -ENXIO)
+ dev_info(&pdev->dev, "MV_USB_MODE_OTG "
+ "must have CONFIG_USB_PHY enabled\n");
+ else
+ dev_err(&pdev->dev,
+ "unable to find transceiver\n");
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_host(ehci_mv->otg->otg, &hcd->self);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "unable to register with transceiver\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+ /* otg will enable clock before use as host */
+ mv_ehci_disable(ehci_mv);
+ } else {
+ if (pdata->set_vbus)
+ pdata->set_vbus(1);
+
+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(&pdev->dev,
+ "failed to add hcd with err %d\n", retval);
+ goto err_set_vbus;
+ }
+ device_wakeup_enable(hcd->self.controller);
+ }
+
+ if (pdata->private_init)
+ pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
+
+ dev_info(&pdev->dev,
+ "successful find EHCI device with regs 0x%p irq %d"
+ " working in %s mode\n", hcd->regs, hcd->irq,
+ ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
+
+ return 0;
+
+err_set_vbus:
+ if (pdata->set_vbus)
+ pdata->set_vbus(0);
+err_disable_clk:
+ mv_ehci_disable(ehci_mv);
+err_put_hcd:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static int mv_ehci_remove(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+
+ if (hcd->rh_registered)
+ usb_remove_hcd(hcd);
+
+ if (!IS_ERR_OR_NULL(ehci_mv->otg))
+ otg_set_host(ehci_mv->otg->otg, NULL);
+
+ if (ehci_mv->mode == MV_USB_MODE_HOST) {
+ if (ehci_mv->pdata->set_vbus)
+ ehci_mv->pdata->set_vbus(0);
+
+ mv_ehci_disable(ehci_mv);
+ }
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+MODULE_ALIAS("mv-ehci");
+
+static const struct platform_device_id ehci_id_table[] = {
+ {"pxa-u2oehci", PXA_U2OEHCI},
+ {"pxa-sph", PXA_SPH},
+ {"mmp3-hsic", MMP3_HSIC},
+ {"mmp3-fsic", MMP3_FSIC},
+ {},
+};
+
+static void mv_ehci_shutdown(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+
+ if (!hcd->rh_registered)
+ return;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_mv_driver = {
+ .probe = mv_ehci_probe,
+ .remove = mv_ehci_remove,
+ .shutdown = mv_ehci_shutdown,
+ .driver = {
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ },
+ .id_table = ehci_id_table,
+};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
new file mode 100644
index 00000000000..dbe5e4eea08
--- /dev/null
+++ b/drivers/usb/host/ehci-mxc.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_data/usb-ehci-mxc.h>
+#include "ehci.h"
+
+#define DRIVER_DESC "Freescale On-Chip EHCI Host driver"
+
+static const char hcd_name[] = "ehci-mxc";
+
+#define ULPI_VIEWPORT_OFFSET 0x170
+
+struct ehci_mxc_priv {
+ struct clk *usbclk, *ahbclk, *phyclk;
+};
+
+static struct hc_driver __read_mostly ehci_mxc_hc_driver;
+
+static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = {
+ .extra_priv_size = sizeof(struct ehci_mxc_priv),
+};
+
+static int ehci_mxc_drv_probe(struct platform_device *pdev)
+{
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq, ret;
+ struct ehci_mxc_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct ehci_hcd *ehci;
+
+ if (!pdata) {
+ dev_err(dev, "No platform data given, bailing out.\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+
+ hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Found HC with no register addr. Check setup!\n");
+ ret = -ENODEV;
+ goto err_alloc;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err_alloc;
+ }
+
+ hcd->has_tt = 1;
+ ehci = hcd_to_ehci(hcd);
+ priv = (struct ehci_mxc_priv *) ehci->priv;
+
+ /* enable clocks */
+ priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(priv->usbclk)) {
+ ret = PTR_ERR(priv->usbclk);
+ goto err_alloc;
+ }
+ clk_prepare_enable(priv->usbclk);
+
+ priv->ahbclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(priv->ahbclk)) {
+ ret = PTR_ERR(priv->ahbclk);
+ goto err_clk_ahb;
+ }
+ clk_prepare_enable(priv->ahbclk);
+
+ /* "dr" device has its own clock on i.MX51 */
+ priv->phyclk = devm_clk_get(&pdev->dev, "phy");
+ if (IS_ERR(priv->phyclk))
+ priv->phyclk = NULL;
+ if (priv->phyclk)
+ clk_prepare_enable(priv->phyclk);
+
+
+ /* call platform specific init function */
+ if (pdata->init) {
+ ret = pdata->init(pdev);
+ if (ret) {
+ dev_err(dev, "platform init failed\n");
+ goto err_init;
+ }
+ /* platforms need some time to settle changed IO settings */
+ mdelay(10);
+ }
+
+ /* EHCI registers start at offset 0x100 */
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+ /* set up the PORTSCx register */
+ ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
+
+ /* is this really needed? */
+ msleep(10);
+
+ /* Initialize the transceiver */
+ if (pdata->otg) {
+ pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
+ ret = usb_phy_init(pdata->otg);
+ if (ret) {
+ dev_err(dev, "unable to init transceiver, probably missing\n");
+ ret = -ENODEV;
+ goto err_add;
+ }
+ ret = otg_set_vbus(pdata->otg->otg, 1);
+ if (ret) {
+ dev_err(dev, "unable to enable vbus on transceiver\n");
+ goto err_add;
+ }
+ }
+
+ platform_set_drvdata(pdev, hcd);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto err_add;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+
+err_add:
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+err_init:
+ if (priv->phyclk)
+ clk_disable_unprepare(priv->phyclk);
+
+ clk_disable_unprepare(priv->ahbclk);
+err_clk_ahb:
+ clk_disable_unprepare(priv->usbclk);
+err_alloc:
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static int ehci_mxc_drv_remove(struct platform_device *pdev)
+{
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
+
+ usb_remove_hcd(hcd);
+
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+
+ if (pdata && pdata->otg)
+ usb_phy_shutdown(pdata->otg);
+
+ clk_disable_unprepare(priv->usbclk);
+ clk_disable_unprepare(priv->ahbclk);
+
+ if (priv->phyclk)
+ clk_disable_unprepare(priv->phyclk);
+
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+MODULE_ALIAS("platform:mxc-ehci");
+
+static struct platform_driver ehci_mxc_driver = {
+ .probe = ehci_mxc_drv_probe,
+ .remove = ehci_mxc_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "mxc-ehci",
+ },
+};
+
+static int __init ehci_mxc_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides);
+ return platform_driver_register(&ehci_mxc_driver);
+}
+module_init(ehci_mxc_init);
+
+static void __exit ehci_mxc_cleanup(void)
+{
+ platform_driver_unregister(&ehci_mxc_driver);
+}
+module_exit(ehci_mxc_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
new file mode 100644
index 00000000000..9051439039a
--- /dev/null
+++ b/drivers/usb/host/ehci-octeon.c
@@ -0,0 +1,188 @@
+/*
+ * EHCI HCD glue for Cavium Octeon II SOCs.
+ *
+ * Loosely based on ehci-au1xxx.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010 Cavium Networks
+ *
+ */
+
+#include <linux/platform_device.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
+
+#define OCTEON_EHCI_HCD_NAME "octeon-ehci"
+
+/* Common clock init code. */
+void octeon2_usb_clocks_start(void);
+void octeon2_usb_clocks_stop(void);
+
+static void ehci_octeon_start(void)
+{
+ union cvmx_uctlx_ehci_ctl ehci_ctl;
+
+ octeon2_usb_clocks_start();
+
+ ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
+ /* Use 64-bit addressing. */
+ ehci_ctl.s.ehci_64b_addr_en = 1;
+ ehci_ctl.s.l2c_addr_msb = 0;
+ ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+ ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+ cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
+}
+
+static void ehci_octeon_stop(void)
+{
+ octeon2_usb_clocks_stop();
+}
+
+static const struct hc_driver ehci_octeon_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Octeon EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static u64 ehci_octeon_dma_mask = DMA_BIT_MASK(64);
+
+static int ehci_octeon_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource *res_mem;
+ int irq;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No irq assigned\n");
+ return -ENODEV;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "No register space assigned\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We can DMA from anywhere. But the descriptors must be in
+ * the lower 4GB.
+ */
+ pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err1;
+ }
+
+ ehci_octeon_start();
+
+ ehci = hcd_to_ehci(hcd);
+
+ /* Octeon EHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+ ehci->big_endian_mmio = 1;
+#endif
+
+ ehci->caps = hcd->regs;
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ goto err2;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ platform_set_drvdata(pdev, hcd);
+
+ return 0;
+err2:
+ ehci_octeon_stop();
+
+err1:
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static int ehci_octeon_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+
+ ehci_octeon_stop();
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver ehci_octeon_driver = {
+ .probe = ehci_octeon_drv_probe,
+ .remove = ehci_octeon_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = OCTEON_EHCI_HCD_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:" OCTEON_EHCI_HCD_NAME);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
new file mode 100644
index 00000000000..a24720beb39
--- /dev/null
+++ b/drivers/usb/host/ehci-omap.c
@@ -0,0 +1,327 @@
+/*
+ * ehci-omap.c - driver for USBHOST on OMAP3/4 processors
+ *
+ * Bus Glue for the EHCI controllers in OMAP3/4
+ * Tested on several OMAP3 boards, and OMAP4 Pandaboard
+ *
+ * Copyright (C) 2007-2013 Texas Instruments, Inc.
+ * Author: Vikram Pandita <vikram.pandita@ti.com>
+ * Author: Anand Gadiyar <gadiyar@ti.com>
+ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
+ * Author: Roger Quadros <rogerq@ti.com>
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb/ulpi.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+
+#include "ehci.h"
+
+#include <linux/platform_data/usb-omap.h>
+
+/* EHCI Register Set */
+#define EHCI_INSNREG04 (0xA0)
+#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
+#define EHCI_INSNREG05_ULPI (0xA4)
+#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
+#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
+#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
+#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
+#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
+#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
+
+#define DRIVER_DESC "OMAP-EHCI Host Controller driver"
+
+static const char hcd_name[] = "ehci-omap";
+
+/*-------------------------------------------------------------------------*/
+
+struct omap_hcd {
+ struct usb_phy *phy[OMAP3_HS_USB_PORTS]; /* one PHY for each port */
+ int nports;
+};
+
+static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
+{
+ __raw_writel(val, base + reg);
+}
+
+static inline u32 ehci_read(void __iomem *base, u32 reg)
+{
+ return __raw_readl(base + reg);
+}
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+static struct hc_driver __read_mostly ehci_omap_hc_driver;
+
+static const struct ehci_driver_overrides ehci_omap_overrides __initdata = {
+ .extra_priv_size = sizeof(struct omap_hcd),
+};
+
+/**
+ * ehci_hcd_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int ehci_hcd_omap_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
+ struct resource *res;
+ struct usb_hcd *hcd;
+ void __iomem *regs;
+ int ret;
+ int irq;
+ int i;
+ struct omap_hcd *omap;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ if (!dev->parent) {
+ dev_err(dev, "Missing parent device\n");
+ return -ENODEV;
+ }
+
+ /* For DT boot, get platform data from parent. i.e. usbhshost */
+ if (dev->of_node) {
+ pdata = dev_get_platdata(dev->parent);
+ dev->platform_data = pdata;
+ }
+
+ if (!pdata) {
+ dev_err(dev, "Missing platform data\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "EHCI irq failed\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = -ENODEV;
+ hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "Failed to create HCD\n");
+ return -ENOMEM;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = regs;
+ hcd_to_ehci(hcd)->caps = regs;
+
+ omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
+ omap->nports = pdata->nports;
+
+ platform_set_drvdata(pdev, hcd);
+
+ /* get the PHY devices if needed */
+ for (i = 0 ; i < omap->nports ; i++) {
+ struct usb_phy *phy;
+
+ /* get the PHY device */
+ if (dev->of_node)
+ phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
+ else
+ phy = devm_usb_get_phy_dev(dev, i);
+ if (IS_ERR(phy)) {
+ /* Don't bail out if PHY is not absolutely necessary */
+ if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY)
+ continue;
+
+ ret = PTR_ERR(phy);
+ dev_err(dev, "Can't get PHY device for port %d: %d\n",
+ i, ret);
+ goto err_phy;
+ }
+
+ omap->phy[i] = phy;
+
+ if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) {
+ usb_phy_init(omap->phy[i]);
+ /* bring PHY out of suspend */
+ usb_phy_set_suspend(omap->phy[i], 0);
+ }
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ /*
+ * An undocumented "feature" in the OMAP3 EHCI controller,
+ * causes suspended ports to be taken out of suspend when
+ * the USBCMD.Run/Stop bit is cleared (for example when
+ * we do ehci_bus_suspend).
+ * This breaks suspend-resume if the root-hub is allowed
+ * to suspend. Writing 1 to this undocumented register bit
+ * disables this feature and restores normal behavior.
+ */
+ ehci_write(regs, EHCI_INSNREG04,
+ EHCI_INSNREG04_DISABLE_UNSUSPEND);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret) {
+ dev_err(dev, "failed to add hcd with err %d\n", ret);
+ goto err_pm_runtime;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ /*
+ * Bring PHYs out of reset for non PHY modes.
+ * Even though HSIC mode is a PHY-less mode, the reset
+ * line exists between the chips and can be modelled
+ * as a PHY device for reset control.
+ */
+ for (i = 0; i < omap->nports; i++) {
+ if (!omap->phy[i] ||
+ pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY)
+ continue;
+
+ usb_phy_init(omap->phy[i]);
+ /* bring PHY out of suspend */
+ usb_phy_set_suspend(omap->phy[i], 0);
+ }
+
+ return 0;
+
+err_pm_runtime:
+ pm_runtime_put_sync(dev);
+
+err_phy:
+ for (i = 0; i < omap->nports; i++) {
+ if (omap->phy[i])
+ usb_phy_shutdown(omap->phy[i]);
+ }
+
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+
+/**
+ * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int ehci_hcd_omap_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct omap_hcd *omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
+ int i;
+
+ usb_remove_hcd(hcd);
+
+ for (i = 0; i < omap->nports; i++) {
+ if (omap->phy[i])
+ usb_phy_shutdown(omap->phy[i]);
+ }
+
+ usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id omap_ehci_dt_ids[] = {
+ { .compatible = "ti,ehci-omap" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids);
+
+static struct platform_driver ehci_hcd_omap_driver = {
+ .probe = ehci_hcd_omap_probe,
+ .remove = ehci_hcd_omap_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ /*.suspend = ehci_hcd_omap_suspend, */
+ /*.resume = ehci_hcd_omap_resume, */
+ .driver = {
+ .name = hcd_name,
+ .of_match_table = omap_ehci_dt_ids,
+ }
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init ehci_omap_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_omap_hc_driver, &ehci_omap_overrides);
+ return platform_driver_register(&ehci_hcd_omap_driver);
+}
+module_init(ehci_omap_init);
+
+static void __exit ehci_omap_cleanup(void)
+{
+ platform_driver_unregister(&ehci_hcd_omap_driver);
+}
+module_exit(ehci_omap_cleanup);
+
+MODULE_ALIAS("platform:ehci-omap");
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 9d487908012..22e15cab8ea 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,7 +12,18 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
-#include <plat/ehci-orion.h>
+#include <linux/clk.h>
+#include <linux/platform_data/usb-ehci-orion.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+
+#include "ehci.h"
#define rdl(off) __raw_readl(hcd->regs + (off))
#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
@@ -30,6 +41,19 @@
#define USB_PHY_IVREF_CTRL 0x440
#define USB_PHY_TST_GRP_CTRL 0x450
+#define DRIVER_DESC "EHCI orion driver"
+
+#define hcd_to_orion_priv(h) ((struct orion_ehci_hcd *)hcd_to_ehci(h)->priv)
+
+struct orion_ehci_hcd {
+ struct clk *clk;
+ struct phy *phy;
+};
+
+static const char hcd_name[] = "ehci-orion";
+
+static struct hc_driver __read_mostly ehci_orion_hc_driver;
+
/*
* Implement Orion USB controller specification guidelines
*/
@@ -100,75 +124,9 @@ static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
wrl(USB_MODE, 0x13);
}
-static int ehci_orion_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- /*
- * data structure init
- */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- hcd->has_tt = 1;
-
- ehci_reset(ehci);
- ehci_port_power(ehci, 0);
-
- return retval;
-}
-
-static const struct hc_driver ehci_orion_hc_driver = {
- .description = hcd_name,
- .product_desc = "Marvell Orion EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
-
- /*
- * basic lifecycle operations
- */
- .reset = ehci_orion_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-};
-
-static void __init
+static void
ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -178,7 +136,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -187,14 +145,21 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
}
-static int __init ehci_orion_drv_probe(struct platform_device *pdev)
+static const struct ehci_driver_overrides orion_overrides __initconst = {
+ .extra_priv_size = sizeof(struct orion_ehci_hcd),
+};
+
+static int ehci_orion_drv_probe(struct platform_device *pdev)
{
- struct orion_ehci_data *pd = pdev->dev.platform_data;
+ struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev);
+ const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
void __iomem *regs;
int irq, err;
+ enum orion_ehci_phy_ver phy_version;
+ struct orion_ehci_hcd *priv;
if (usb_disabled())
return -ENODEV;
@@ -207,7 +172,7 @@ static int __init ehci_orion_drv_probe(struct platform_device *pdev)
"Found HC with no IRQ. Check %s setup!\n",
dev_name(&pdev->dev));
err = -ENODEV;
- goto err1;
+ goto err;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -216,52 +181,78 @@ static int __init ehci_orion_drv_probe(struct platform_device *pdev)
"Found HC with no register addr. Check %s setup!\n",
dev_name(&pdev->dev));
err = -ENODEV;
- goto err1;
+ goto err;
}
- if (!request_mem_region(res->start, res->end - res->start + 1,
- ehci_orion_hc_driver.description)) {
- dev_dbg(&pdev->dev, "controller already in use\n");
- err = -EBUSY;
- goto err1;
- }
+ /*
+ * Right now device-tree probed devices don't get dma_mask
+ * set. Since shared usb code relies on it, set it here for
+ * now. Once we have dma capability bindings this can go away.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err;
- regs = ioremap(res->start, res->end - res->start + 1);
- if (regs == NULL) {
- dev_dbg(&pdev->dev, "error mapping memory\n");
- err = -EFAULT;
- goto err2;
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs)) {
+ err = PTR_ERR(regs);
+ goto err;
}
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
err = -ENOMEM;
- goto err3;
+ goto err;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
hcd->has_tt = 1;
- ehci->sbrn = 0x20;
+
+ priv = hcd_to_orion_priv(hcd);
+ /*
+ * Not all platforms can gate the clock, so it is not an error if
+ * the clock does not exists.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ goto err_phy_get;
+ } else {
+ err = phy_init(priv->phy);
+ if (err)
+ goto err_phy_init;
+
+ err = phy_power_on(priv->phy);
+ if (err)
+ goto err_phy_power_on;
+ }
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- ehci_orion_conf_mbus_windows(hcd, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ ehci_orion_conf_mbus_windows(hcd, dram);
/*
* setup Orion USB controller.
*/
- switch (pd->phy_version) {
+ if (pdev->dev.of_node)
+ phy_version = EHCI_PHY_NA;
+ else
+ phy_version = pd->phy_version;
+
+ switch (phy_version) {
case EHCI_PHY_NA: /* dont change USB phy settings */
break;
case EHCI_PHY_ORION:
@@ -270,45 +261,90 @@ static int __init ehci_orion_drv_probe(struct platform_device *pdev)
case EHCI_PHY_DD:
case EHCI_PHY_KW:
default:
- printk(KERN_WARNING "Orion ehci -USB phy version isn't supported.\n");
+ dev_warn(&pdev->dev, "USB phy version isn't supported.\n");
}
- err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err4;
+ goto err_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
return 0;
-err4:
+err_add_hcd:
+ if (!IS_ERR(priv->phy))
+ phy_power_off(priv->phy);
+err_phy_power_on:
+ if (!IS_ERR(priv->phy))
+ phy_exit(priv->phy);
+err_phy_init:
+err_phy_get:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
-err3:
- iounmap(regs);
-err2:
- release_mem_region(res->start, res->end - res->start + 1);
-err1:
+err:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
return err;
}
-static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
+static int ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct orion_ehci_hcd *priv = hcd_to_orion_priv(hcd);
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ if (!IS_ERR(priv->phy)) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
usb_put_hcd(hcd);
return 0;
}
-MODULE_ALIAS("platform:orion-ehci");
+static const struct of_device_id ehci_orion_dt_ids[] = {
+ { .compatible = "marvell,orion-ehci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
static struct platform_driver ehci_orion_driver = {
.probe = ehci_orion_drv_probe,
- .remove = __exit_p(ehci_orion_drv_remove),
+ .remove = ehci_orion_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
- .driver.name = "orion-ehci",
+ .driver = {
+ .name = "orion-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_orion_dt_ids,
+ },
};
+
+static int __init ehci_orion_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_orion_hc_driver, &orion_overrides);
+ return platform_driver_register(&ehci_orion_driver);
+}
+module_init(ehci_orion_init);
+
+static void __exit ehci_orion_cleanup(void)
+{
+ platform_driver_unregister(&ehci_orion_driver);
+}
+module_exit(ehci_orion_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:orion-ehci");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bb21fb0a496..3e86bf4371b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -18,37 +18,29 @@
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#ifndef CONFIG_PCI
-#error "This file is PCI bus glue. CONFIG_PCI must be defined."
-#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+#include "pci-quirks.h"
+
+#define DRIVER_DESC "EHCI PCI platform driver"
+
+static const char hcd_name[] = "ehci-pci";
+
+/* defined here to avoid adding to pci_ids.h for single instance use */
+#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
/*-------------------------------------------------------------------------*/
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
{
- u32 temp;
int retval;
- /* optional debug port, normally in the first BAR */
- temp = pci_find_capability(pdev, 0x0a);
- if (temp) {
- pci_read_config_dword(pdev, temp, &temp);
- temp >>= 16;
- if ((temp & (3 << 13)) == (1 << 13)) {
- temp &= 0x1fff;
- ehci->debug = ehci_to_hcd(ehci)->regs + temp;
- temp = ehci_readl(ehci, &ehci->debug->control);
- ehci_info(ehci, "debug port %d%s\n",
- HCS_DEBUG_PORT(ehci->hcs_params),
- (temp & DBGP_ENABLED)
- ? " IN USE"
- : "");
- if (!(temp & DBGP_ENABLED))
- ehci->debug = NULL;
- }
- }
-
/* we expect static quirk code to handle the "extended capabilities"
* (currently just BIOS handoff) allowed starting with EHCI 0.96
*/
@@ -66,11 +58,20 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- struct pci_dev *p_smbus;
- u8 rev;
u32 temp;
int retval;
+ ehci->caps = hcd->regs;
+
+ /*
+ * ehci_init() causes memory for DMA transfers to be
+ * allocated. Thus, any vendor-specific workarounds based on
+ * limiting the type of memory used for DMA transfers must
+ * happen before ehci_setup() is called.
+ *
+ * Most other workarounds can be done either before or after
+ * init and reset; they are located here too.
+ */
switch (pdev->vendor) {
case PCI_VENDOR_ID_TOSHIBA_2:
/* celleb's companion chip */
@@ -83,20 +84,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
#endif
}
break;
- }
-
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
-
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
-
- /* ehci_init() causes memory for DMA transfers to be
- * allocated. Thus, any vendor-specific workarounds based on
- * limiting the type of memory used for DMA transfers must
- * happen before ehci_init() is called. */
- switch (pdev->vendor) {
case PCI_VENDOR_ID_NVIDIA:
/* NVidia reports that certain chips don't handle
* QH, ITD, or SITD addresses above 2GB. (But TD,
@@ -108,50 +95,49 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
case 0x00d8: /* CK8 */
case 0x00e8: /* CK8S */
if (pci_set_consistent_dma_mask(pdev,
- DMA_31BIT_MASK) < 0)
+ DMA_BIT_MASK(31)) < 0)
ehci_warn(ehci, "can't enable NVidia "
"workaround for >2GB RAM\n");
break;
+
+ /* Some NForce2 chips have problems with selective suspend;
+ * fixed in newer silicon.
+ */
+ case 0x0068:
+ if (pdev->revision < 0xa4)
+ ehci->no_selective_suspend = 1;
+ break;
}
break;
- }
-
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- /* data structure init */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- switch (pdev->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB)
+ hcd->has_tt = 1;
+ break;
case PCI_VENDOR_ID_TDI:
- if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
+ if (pdev->device == PCI_DEVICE_ID_TDI_EHCI)
hcd->has_tt = 1;
- tdi_reset(ehci);
- }
break;
case PCI_VENDOR_ID_AMD:
+ /* AMD PLL quirk */
+ if (usb_amd_find_chipset_info())
+ ehci->amd_pll_fix = 1;
/* AMD8111 EHCI doesn't work, according to AMD errata */
if (pdev->device == 0x7463) {
ehci_info(ehci, "ignoring AMD8111 (errata)\n");
retval = -EIO;
goto done;
}
- break;
- case PCI_VENDOR_ID_NVIDIA:
- switch (pdev->device) {
- /* Some NForce2 chips have problems with selective suspend;
- * fixed in newer silicon.
+
+ /*
+ * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
+ * read/write memory space which does not belong to it when
+ * there is NULL pointer with T-bit set to 1 in the frame list
+ * table. To avoid the issue, the frame list link pointer
+ * should always contain a valid pointer to a inactive qh.
*/
- case 0x0068:
- if (pdev->revision < 0xa4)
- ehci->no_selective_suspend = 1;
- break;
+ if (pdev->device == 0x7808) {
+ ehci->use_dummy_qh = 1;
+ ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
}
break;
case PCI_VENDOR_ID_VIA:
@@ -169,30 +155,86 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
break;
case PCI_VENDOR_ID_ATI:
+ /* AMD PLL quirk */
+ if (usb_amd_find_chipset_info())
+ ehci->amd_pll_fix = 1;
+
+ /*
+ * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
+ * read/write memory space which does not belong to it when
+ * there is NULL pointer with T-bit set to 1 in the frame list
+ * table. To avoid the issue, the frame list link pointer
+ * should always contain a valid pointer to a inactive qh.
+ */
+ if (pdev->device == 0x4396) {
+ ehci->use_dummy_qh = 1;
+ ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
+ }
/* SB600 and old version of SB700 have a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
- if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
- p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
- PCI_DEVICE_ID_ATI_SBX00_SMBUS,
- NULL);
- if (!p_smbus)
- break;
- rev = p_smbus->revision;
- if ((pdev->device == 0x4386) || (rev == 0x3a)
- || (rev == 0x3b)) {
- u8 tmp;
- ehci_info(ehci, "applying AMD SB600/SB700 USB "
- "freeze workaround\n");
- pci_read_config_byte(pdev, 0x53, &tmp);
- pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
- }
- pci_dev_put(p_smbus);
+ if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
+ usb_amd_hang_symptom_quirk()) {
+ u8 tmp;
+ ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
+ pci_read_config_byte(pdev, 0x53, &tmp);
+ pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
break;
+ case PCI_VENDOR_ID_NETMOS:
+ /* MosChip frame-index-register bug */
+ ehci_info(ehci, "applying MosChip frame-index workaround\n");
+ ehci->frame_index_bug = 1;
+ break;
+ }
+
+ /* optional debug port, normally in the first BAR */
+ temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
+ if (temp) {
+ pci_read_config_dword(pdev, temp, &temp);
+ temp >>= 16;
+ if (((temp >> 13) & 7) == 1) {
+ u32 hcs_params = ehci_readl(ehci,
+ &ehci->caps->hcs_params);
+
+ temp &= 0x1fff;
+ ehci->debug = hcd->regs + temp;
+ temp = ehci_readl(ehci, &ehci->debug->control);
+ ehci_info(ehci, "debug port %d%s\n",
+ HCS_DEBUG_PORT(hcs_params),
+ (temp & DBGP_ENABLED) ? " IN USE" : "");
+ if (!(temp & DBGP_ENABLED))
+ ehci->debug = NULL;
+ }
}
- ehci_reset(ehci);
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ /* These workarounds need to be applied after ehci_setup() */
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_NEC:
+ ehci->need_io_watchdog = 0;
+ break;
+ case PCI_VENDOR_ID_INTEL:
+ ehci->need_io_watchdog = 0;
+ break;
+ case PCI_VENDOR_ID_NVIDIA:
+ switch (pdev->device) {
+ /* MCP89 chips on the MacBookAir3,1 give EPROTO when
+ * fetching device descriptors unless LPM is disabled.
+ * There are also intermittent problems enumerating
+ * devices with PPCD enabled.
+ */
+ case 0x0d9d:
+ ehci_info(ehci, "disable ppcd for nvidia mcp89\n");
+ ehci->has_ppcd = 0;
+ ehci->command &= ~CMD_PPCEE;
+ break;
+ }
+ break;
+ }
/* at least the Genesys GL880S needs fixup here */
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
@@ -217,7 +259,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
/* Serial Bus Release Number is at PCI 0x60 offset */
- pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO
+ && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
+ ; /* ConneXT has no sbrn register */
+ else
+ pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
/* Keep this around for a while just in case some EHCI
* implementation uses legacy PCI PM support. This test
@@ -234,22 +280,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
}
}
-#ifdef CONFIG_USB_SUSPEND
- /* REVISIT: the controller works fine for wakeup iff the root hub
- * itself is "globally" suspended, but usbcore currently doesn't
- * understand such things.
- *
- * System suspend currently expects to be able to suspend the entire
- * device tree, device-at-a-time. If we failed selective suspend
- * reports, system suspend would fail; so the root hub code must claim
- * success. That's lying to usbcore, and it matters for for runtime
- * PM scenarios with selective suspend and remote wakeup...
- */
+#ifdef CONFIG_PM_RUNTIME
if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif
- ehci_port_power(ehci, 1);
retval = ehci_pci_reinit(ehci, pdev);
done:
return retval;
@@ -268,146 +303,26 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- unsigned long flags;
- int rc = 0;
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(10);
-
- /* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
- */
- spin_lock_irqsave (&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
- ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- (void)ehci_readl(ehci, &ehci->regs->intr_enable);
-
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW) {
- ehci_halt(ehci);
- ehci_reset(ehci);
- }
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- bail:
- spin_unlock_irqrestore (&ehci->lock, flags);
-
- // could save FLADJ in case of Vaux power loss
- // ... we'd only use it to handle clock skew
-
- return rc;
-}
-
-static int ehci_pci_resume(struct usb_hcd *hcd)
+static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- // maybe restore FLADJ
-
- if (time_before(jiffies, ehci->next_statechange))
- msleep(100);
-
- /* Mark hardware accessible again as we are out of D3 state by now */
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- /* If CF is still set, we maintained PCI Vaux power.
- * Just undo the effect of ehci_pci_suspend().
- */
- if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
- int mask = INTR_MASK;
-
- if (!hcd->self.root_hub->do_remote_wakeup)
- mask &= ~STS_PCD;
- ehci_writel(ehci, mask, &ehci->regs->intr_enable);
- ehci_readl(ehci, &ehci->regs->intr_enable);
- return 0;
- }
-
- ehci_dbg(ehci, "lost power, restarting\n");
- usb_root_hub_lost_power(hcd->self.root_hub);
-
- /* Else reset, to cope with power loss or flush-to-storage
- * style "resume" having let BIOS kick in during reboot.
- */
- (void) ehci_halt(ehci);
- (void) ehci_reset(ehci);
- (void) ehci_pci_reinit(ehci, pdev);
-
- /* emptying the schedule aborts any urbs */
- spin_lock_irq(&ehci->lock);
- if (ehci->reclaim)
- end_unlink_async(ehci);
- ehci_work(ehci);
- spin_unlock_irq(&ehci->lock);
-
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
- ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
-
- /* here we "know" root ports should always stay powered */
- ehci_port_power(ehci, 1);
-
- hcd->state = HC_STATE_SUSPENDED;
+ if (ehci_resume(hcd, hibernated) != 0)
+ (void) ehci_pci_reinit(ehci, pdev);
return 0;
}
-#endif
-
-static const struct hc_driver ehci_pci_hc_driver = {
- .description = hcd_name,
- .product_desc = "EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
- /*
- * basic lifecycle operations
- */
- .reset = ehci_pci_setup,
- .start = ehci_run,
-#ifdef CONFIG_PM
- .pci_suspend = ehci_pci_suspend,
- .pci_resume = ehci_pci_resume,
-#endif
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
+#else
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
+#define ehci_suspend NULL
+#define ehci_pci_resume NULL
+#endif /* CONFIG_PM */
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
+static struct hc_driver __read_mostly ehci_pci_hc_driver;
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
+static const struct ehci_driver_overrides pci_overrides __initconst = {
+ .reset = ehci_pci_setup,
};
/*-------------------------------------------------------------------------*/
@@ -417,6 +332,9 @@ static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
.driver_data = (unsigned long) &ehci_pci_hc_driver,
+ }, {
+ PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
+ .driver_data = (unsigned long) &ehci_pci_hc_driver,
},
{ /* end: all zeroes */ }
};
@@ -429,11 +347,39 @@ static struct pci_driver ehci_pci_driver = {
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM
- .suspend = usb_hcd_pci_suspend,
- .resume_early = usb_hcd_pci_resume_early,
- .resume = usb_hcd_pci_resume,
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
#endif
- .shutdown = usb_hcd_pci_shutdown,
};
+
+static int __init ehci_pci_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides);
+
+ /* Entries for the PCI suspend/resume callbacks are special */
+ ehci_pci_hc_driver.pci_suspend = ehci_suspend;
+ ehci_pci_hc_driver.pci_resume = ehci_pci_resume;
+
+ return pci_register_driver(&ehci_pci_driver);
+}
+module_init(ehci_pci_init);
+
+static void __exit ehci_pci_cleanup(void)
+{
+ pci_unregister_driver(&ehci_pci_driver);
+}
+module_exit(ehci_pci_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
new file mode 100644
index 00000000000..2f5b9ce3e04
--- /dev/null
+++ b/drivers/usb/host/ehci-platform.c
@@ -0,0 +1,410 @@
+/*
+ * Generic platform ehci driver
+ *
+ * Copyright 2007 Steven Brown <sbrown@cortland.com>
+ * Copyright 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Derived from the ohci-ssb driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the EHCI-PCI driver
+ * Copyright (c) 2000-2004 by David Brownell
+ *
+ * Derived from the ohci-pci driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ehci_pdriver.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI generic platform driver"
+#define EHCI_MAX_CLKS 3
+#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
+
+struct ehci_platform_priv {
+ struct clk *clks[EHCI_MAX_CLKS];
+ struct reset_control *rst;
+ struct phy *phy;
+};
+
+static const char hcd_name[] = "ehci-platform";
+
+static int ehci_platform_reset(struct usb_hcd *hcd)
+{
+ struct platform_device *pdev = to_platform_device(hcd->self.controller);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ hcd->has_tt = pdata->has_tt;
+ ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
+
+ if (pdata->pre_setup) {
+ retval = pdata->pre_setup(hcd);
+ if (retval < 0)
+ return retval;
+ }
+
+ ehci->caps = hcd->regs + pdata->caps_offset;
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ if (pdata->no_io_watchdog)
+ ehci->need_io_watchdog = 0;
+ return 0;
+}
+
+static int ehci_platform_power_on(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk, ret;
+
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+ ret = clk_prepare_enable(priv->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ if (priv->phy) {
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = phy_power_on(priv->phy);
+ if (ret)
+ goto err_exit_phy;
+ }
+
+ return 0;
+
+err_exit_phy:
+ phy_exit(priv->phy);
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(priv->clks[clk]);
+
+ return ret;
+}
+
+static void ehci_platform_power_off(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
+
+ if (priv->phy) {
+ phy_power_off(priv->phy);
+ phy_exit(priv->phy);
+ }
+
+ for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
+ if (priv->clks[clk])
+ clk_disable_unprepare(priv->clks[clk]);
+}
+
+static struct hc_driver __read_mostly ehci_platform_hc_driver;
+
+static const struct ehci_driver_overrides platform_overrides __initconst = {
+ .reset = ehci_platform_reset,
+ .extra_priv_size = sizeof(struct ehci_platform_priv),
+};
+
+static struct usb_ehci_pdata ehci_platform_defaults = {
+ .power_on = ehci_platform_power_on,
+ .power_suspend = ehci_platform_power_off,
+ .power_off = ehci_platform_power_off,
+};
+
+static int ehci_platform_probe(struct platform_device *dev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res_mem;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv;
+ struct ehci_hcd *ehci;
+ int err, irq, clk = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Use reasonable defaults so platforms don't have to provide these
+ * with DT probing on ARM.
+ */
+ if (!pdata)
+ pdata = &ehci_platform_defaults;
+
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ dev_err(&dev->dev, "no irq provided");
+ return irq;
+ }
+ res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res_mem) {
+ dev_err(&dev->dev, "no memory resource provided");
+ return -ENXIO;
+ }
+
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, hcd);
+ dev->dev.platform_data = pdata;
+ priv = hcd_to_ehci_priv(hcd);
+ ehci = hcd_to_ehci(hcd);
+
+ if (pdata == &ehci_platform_defaults && dev->dev.of_node) {
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+ ehci->big_endian_mmio = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+ ehci->big_endian_desc = 1;
+
+ if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+ ehci->big_endian_mmio = ehci->big_endian_desc = 1;
+
+ priv->phy = devm_phy_get(&dev->dev, "usb");
+ if (IS_ERR(priv->phy)) {
+ err = PTR_ERR(priv->phy);
+ if (err == -EPROBE_DEFER)
+ goto err_put_hcd;
+ priv->phy = NULL;
+ }
+
+ for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
+ priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+ if (IS_ERR(priv->clks[clk])) {
+ err = PTR_ERR(priv->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->clks[clk] = NULL;
+ break;
+ }
+ }
+ }
+
+ priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ priv->rst = NULL;
+ } else {
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ goto err_put_clks;
+ }
+
+ if (pdata->big_endian_desc)
+ ehci->big_endian_desc = 1;
+ if (pdata->big_endian_mmio)
+ ehci->big_endian_mmio = 1;
+
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+ if (ehci->big_endian_mmio) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_MMIO not set\n");
+ err = -EINVAL;
+ goto err_reset;
+ }
+#endif
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+ if (ehci->big_endian_desc) {
+ dev_err(&dev->dev,
+ "Error: CONFIG_USB_EHCI_BIG_ENDIAN_DESC not set\n");
+ err = -EINVAL;
+ goto err_reset;
+ }
+#endif
+
+ if (pdata->power_on) {
+ err = pdata->power_on(dev);
+ if (err < 0)
+ goto err_reset;
+ }
+
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
+ hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto err_power;
+ }
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err)
+ goto err_power;
+
+ device_wakeup_enable(hcd->self.controller);
+ platform_set_drvdata(dev, hcd);
+
+ return err;
+
+err_power:
+ if (pdata->power_off)
+ pdata->power_off(dev);
+err_reset:
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(priv->clks[clk]);
+err_put_hcd:
+ if (pdata == &ehci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ usb_put_hcd(hcd);
+
+ return err;
+}
+
+static int ehci_platform_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ int clk;
+
+ usb_remove_hcd(hcd);
+
+ if (pdata->power_off)
+ pdata->power_off(dev);
+
+ if (priv->rst)
+ reset_control_assert(priv->rst);
+
+ for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
+ clk_put(priv->clks[clk]);
+
+ usb_put_hcd(hcd);
+
+ if (pdata == &ehci_platform_defaults)
+ dev->dev.platform_data = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int ehci_platform_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ bool do_wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = ehci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
+ if (pdata->power_suspend)
+ pdata->power_suspend(pdev);
+
+ return ret;
+}
+
+static int ehci_platform_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+
+ if (pdata->power_on) {
+ int err = pdata->power_on(pdev);
+ if (err < 0)
+ return err;
+ }
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+#define ehci_platform_suspend NULL
+#define ehci_platform_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id vt8500_ehci_ids[] = {
+ { .compatible = "via,vt8500-ehci", },
+ { .compatible = "wm,prizm-ehci", },
+ { .compatible = "generic-ehci", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
+
+static const struct platform_device_id ehci_platform_table[] = {
+ { "ehci-platform", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ehci_platform_table);
+
+static const struct dev_pm_ops ehci_platform_pm_ops = {
+ .suspend = ehci_platform_suspend,
+ .resume = ehci_platform_resume,
+};
+
+static struct platform_driver ehci_platform_driver = {
+ .id_table = ehci_platform_table,
+ .probe = ehci_platform_probe,
+ .remove = ehci_platform_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ehci-platform",
+ .pm = &ehci_platform_pm_ops,
+ .of_match_table = vt8500_ehci_ids,
+ }
+};
+
+static int __init ehci_platform_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
+ return platform_driver_register(&ehci_platform_driver);
+}
+module_init(ehci_platform_init);
+
+static void __exit ehci_platform_cleanup(void)
+{
+ platform_driver_unregister(&ehci_platform_driver);
+}
+module_exit(ehci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
new file mode 100644
index 00000000000..7d75465d97c
--- /dev/null
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -0,0 +1,330 @@
+/*
+ * PMC MSP EHCI (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 2006-2010 PMC-Sierra Inc
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+/* includes */
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/usb.h>
+#include <msp_usb.h>
+
+/* stream disable*/
+#define USB_CTRL_MODE_STREAM_DISABLE 0x10
+
+/* threshold */
+#define USB_CTRL_FIFO_THRESH 0x00300000
+
+/* register offset for usb_mode */
+#define USB_EHCI_REG_USB_MODE 0x68
+
+/* register offset for usb fifo */
+#define USB_EHCI_REG_USB_FIFO 0x24
+
+/* register offset for usb status */
+#define USB_EHCI_REG_USB_STATUS 0x44
+
+/* serial/parallel transceiver */
+#define USB_EHCI_REG_BIT_STAT_STS (1<<29)
+
+/* TWI USB0 host device pin */
+#define MSP_PIN_USB0_HOST_DEV 49
+
+/* TWI USB1 host device pin */
+#define MSP_PIN_USB1_HOST_DEV 50
+
+
+static void usb_hcd_tdi_set_mode(struct ehci_hcd *ehci)
+{
+ u8 *base;
+ u8 *statreg;
+ u8 *fiforeg;
+ u32 val;
+ struct ehci_regs *reg_base = ehci->regs;
+
+ /* get register base */
+ base = (u8 *)reg_base + USB_EHCI_REG_USB_MODE;
+ statreg = (u8 *)reg_base + USB_EHCI_REG_USB_STATUS;
+ fiforeg = (u8 *)reg_base + USB_EHCI_REG_USB_FIFO;
+
+ /* Disable controller mode stream */
+ val = ehci_readl(ehci, (u32 *)base);
+ ehci_writel(ehci, (val | USB_CTRL_MODE_STREAM_DISABLE),
+ (u32 *)base);
+
+ /* clear STS to select parallel transceiver interface */
+ val = ehci_readl(ehci, (u32 *)statreg);
+ val = val & ~USB_EHCI_REG_BIT_STAT_STS;
+ ehci_writel(ehci, val, (u32 *)statreg);
+
+ /* write to set the proper fifo threshold */
+ ehci_writel(ehci, USB_CTRL_FIFO_THRESH, (u32 *)fiforeg);
+
+ /* set TWI GPIO USB_HOST_DEV pin high */
+ gpio_direction_output(MSP_PIN_USB0_HOST_DEV, 1);
+}
+
+/* called during probe() after chip reset completes */
+static int ehci_msp_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+ ehci->caps = hcd->regs;
+ hcd->has_tt = 1;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ usb_hcd_tdi_set_mode(ehci);
+
+ return retval;
+}
+
+
+/* configure so an HC device and id are always provided
+ * always called with process context; sleeping is OK
+ */
+
+static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
+{
+ struct resource *res;
+ struct platform_device *pdev = &dev->dev;
+ u32 res_len;
+ int retval;
+
+ /* MAB register space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res == NULL)
+ return -ENOMEM;
+ res_len = resource_size(res);
+ if (!request_mem_region(res->start, res_len, "mab regs"))
+ return -EBUSY;
+
+ dev->mab_regs = ioremap_nocache(res->start, res_len);
+ if (dev->mab_regs == NULL) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ /* MSP USB register space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (res == NULL) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+ res_len = resource_size(res);
+ if (!request_mem_region(res->start, res_len, "usbid regs")) {
+ retval = -EBUSY;
+ goto err2;
+ }
+ dev->usbid_regs = ioremap_nocache(res->start, res_len);
+ if (dev->usbid_regs == NULL) {
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ return 0;
+err3:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ res_len = resource_size(res);
+ release_mem_region(res->start, res_len);
+err2:
+ iounmap(dev->mab_regs);
+err1:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ res_len = resource_size(res);
+ release_mem_region(res->start, res_len);
+ dev_err(&pdev->dev, "Failed to map non-EHCI regs.\n");
+ return retval;
+}
+
+/**
+ * usb_hcd_msp_probe - initialize PMC MSP-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+int usb_hcd_msp_probe(const struct hc_driver *driver,
+ struct platform_device *dev)
+{
+ int retval;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ struct ehci_hcd *ehci ;
+
+ hcd = usb_create_hcd(driver, &dev->dev, "pmcmsp");
+ if (!hcd)
+ return -ENOMEM;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ pr_debug("No IOMEM resource info for %s.\n", dev->name);
+ retval = -ENOMEM;
+ goto err1;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, dev->name)) {
+ retval = -EBUSY;
+ goto err1;
+ }
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ pr_debug("ioremap failed");
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ res = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&dev->dev, "No IRQ resource info for %s.\n", dev->name);
+ retval = -ENOMEM;
+ goto err3;
+ }
+
+ /* Map non-EHCI register spaces */
+ retval = usb_hcd_msp_map_regs(to_mspusb_device(dev));
+ if (retval != 0)
+ goto err3;
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+
+ retval = usb_add_hcd(hcd, res->start, IRQF_SHARED);
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+ }
+
+ usb_remove_hcd(hcd);
+err3:
+ iounmap(hcd->regs);
+err2:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+
+
+/**
+ * usb_hcd_msp_remove - shutdown processing for PMC MSP-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_msp_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ * may be called without controller electrically present
+ * may be called with controller, bus, and devices active
+ */
+void usb_hcd_msp_remove(struct usb_hcd *hcd, struct platform_device *dev)
+{
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+}
+
+static const struct hc_driver ehci_msp_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "PMC MSP EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_msp_setup,
+ .shutdown = ehci_shutdown,
+ .start = ehci_run,
+ .stop = ehci_stop,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_msp_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_debug("In ehci_hcd_msp_drv_probe");
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ gpio_request(MSP_PIN_USB0_HOST_DEV, "USB0_HOST_DEV_GPIO");
+
+ ret = usb_hcd_msp_probe(&ehci_msp_hc_driver, pdev);
+
+ return ret;
+}
+
+static int ehci_hcd_msp_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_hcd_msp_remove(hcd, pdev);
+
+ /* free TWI GPIO USB_HOST_DEV pin */
+ gpio_free(MSP_PIN_USB0_HOST_DEV);
+
+ return 0;
+}
+
+MODULE_ALIAS("pmcmsp-ehci");
+
+static struct platform_driver ehci_hcd_msp_driver = {
+ .probe = ehci_hcd_msp_drv_probe,
+ .remove = ehci_hcd_msp_drv_remove,
+ .driver = {
+ .name = "pmcmsp-ehci",
+ .owner = THIS_MODULE,
+ },
+};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ef732b704f5..547924796d2 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -12,29 +12,14 @@
* This file is licenced under the GPL.
*/
+#include <linux/err.h>
#include <linux/signal.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
-/* called during probe() after chip reset completes */
-static int ehci_ppc_of_setup(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- retval = ehci_init(hcd);
- if (retval)
- return retval;
-
- ehci->sbrn = 0x20;
- return ehci_reset(ehci);
-}
-
static const struct hc_driver ehci_ppc_of_hc_driver = {
.description = hcd_name,
@@ -45,12 +30,12 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
- .reset = ehci_ppc_of_setup,
+ .reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -61,6 +46,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
@@ -78,6 +64,8 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
@@ -86,7 +74,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* Fix: Enable Break Memory Transfer (BMT) in INSNREG3
*/
#define PPC440EPX_EHCI0_INSREG_BMT (0x1 << 0)
-static int __devinit
+static int
ppc44x_enable_bmt(struct device_node *dn)
{
__iomem u32 *insreg_virt;
@@ -102,10 +90,9 @@ ppc44x_enable_bmt(struct device_node *dn)
}
-static int __devinit
-ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
+static int ehci_hcd_ppc_of_probe(struct platform_device *op)
{
- struct device_node *dn = op->node;
+ struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ehci_hcd *ehci = NULL;
struct resource res;
@@ -128,25 +115,19 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
- rv = -EBUSY;
- goto err_rmr;
- }
+ hcd->rsrc_len = resource_size(&res);
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
rv = -EBUSY;
goto err_irq;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
- rv = -ENOMEM;
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
goto err_ioremap;
}
@@ -155,12 +136,14 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
if (np != NULL) {
/* claim we really affected by usb23 erratum */
if (!of_address_to_resource(np, 0, &res))
- ehci->ohci_hcctrl_reg = ioremap(res.start +
- OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
+ ehci->ohci_hcctrl_reg =
+ devm_ioremap(&op->dev,
+ res.start + OHCI_HCCTRL_OFFSET,
+ OHCI_HCCTRL_LEN);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
if (!ehci->ohci_hcctrl_reg) {
- pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n");
+ pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
} else {
ehci->has_amcc_usb23 = 1;
}
@@ -176,11 +159,6 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
ehci->big_endian_desc = 1;
ehci->caps = hcd->regs;
- ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
-
- /* cache this readonly data; minimize chip reads */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
if (of_device_is_compatible(dn, "ibm,usb-ehci-440epx")) {
rv = ppc44x_enable_bmt(dn);
@@ -189,41 +167,34 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
}
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
- return 0;
+ if (rv)
+ goto err_ioremap;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
- iounmap(hcd->regs);
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- if (ehci->has_amcc_usb23)
- iounmap(ehci->ohci_hcctrl_reg);
-err_rmr:
usb_put_hcd(hcd);
return rv;
}
-static int ehci_hcd_ppc_of_remove(struct of_device *op)
+static int ehci_hcd_ppc_of_remove(struct platform_device *op)
{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+ struct usb_hcd *hcd = platform_get_drvdata(op);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct device_node *np;
struct resource res;
- dev_set_drvdata(&op->dev, NULL);
-
dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
usb_remove_hcd(hcd);
- iounmap(hcd->regs);
irq_dispose_mapping(hcd->irq);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
/* use request_mem_region to test if the ohci driver is loaded. if so
* ensure the ohci core is operational.
@@ -238,11 +209,9 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
else
release_mem_region(res.start, 0x4);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
of_node_put(np);
}
-
- iounmap(ehci->ohci_hcctrl_reg);
}
usb_put_hcd(hcd);
@@ -250,18 +219,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
}
-static int ehci_hcd_ppc_of_shutdown(struct of_device *op)
-{
- struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-
- return 0;
-}
-
-
-static struct of_device_id ehci_hcd_ppc_of_match[] = {
+static const struct of_device_id ehci_hcd_ppc_of_match[] = {
{
.compatible = "usb-ehci",
},
@@ -270,14 +228,13 @@ static struct of_device_id ehci_hcd_ppc_of_match[] = {
MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
-static struct of_platform_driver ehci_hcd_ppc_of_driver = {
- .name = "ppc-of-ehci",
- .match_table = ehci_hcd_ppc_of_match,
+static struct platform_driver ehci_hcd_ppc_of_driver = {
.probe = ehci_hcd_ppc_of_probe,
.remove = ehci_hcd_ppc_of_remove,
- .shutdown = ehci_hcd_ppc_of_shutdown,
- .driver = {
- .name = "ppc-of-ehci",
- .owner = THIS_MODULE,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "ppc-of-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_hcd_ppc_of_match,
},
};
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 9c9da35abc6..7934ff9b35e 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -21,33 +21,47 @@
#include <asm/firmware.h>
#include <asm/ps3.h>
-static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
+static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci)
{
- int result;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ /* PS3 HC internal setup register offsets. */
- ehci->big_endian_mmio = 1;
+ enum ps3_ehci_hc_insnreg {
+ ps3_ehci_hc_insnreg01 = 0x084,
+ ps3_ehci_hc_insnreg02 = 0x088,
+ ps3_ehci_hc_insnreg03 = 0x08c,
+ };
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
- &ehci->caps->hc_capbase));
+ /* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its
+ * internal INSNREGXX setup regs back to the chip default values
+ * on Host Controller Reset (CMD_RESET) or Light Host Controller
+ * Reset (CMD_LRESET). The work-around for this is for the HC
+ * driver to re-initialise these regs when ever the HC is reset.
+ */
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
+ /* Set burst transfer counts to 256 out, 32 in. */
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ writel_be(0x01000020, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg01);
- result = ehci_halt(ehci);
+ /* Enable burst transfer counts. */
- if (result)
- return result;
+ writel_be(0x00000001, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg03);
+}
- result = ehci_init(hcd);
+static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
+{
+ int result;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ ehci->big_endian_mmio = 1;
+ ehci->caps = hcd->regs;
+
+ result = ehci_setup(hcd);
if (result)
return result;
- ehci_reset(ehci);
+ ps3_ehci_setup_insnreg(ehci);
return result;
}
@@ -57,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.product_desc = "PS3 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
.reset = ps3_ehci_hc_reset,
.start = ehci_run,
.stop = ehci_stop,
@@ -65,6 +79,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
@@ -74,6 +89,8 @@ static const struct hc_driver ps3_ehci_hc_driver = {
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
@@ -81,7 +98,7 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
int result;
struct usb_hcd *hcd;
unsigned int virq;
- static u64 dummy_mask = DMA_32BIT_MASK;
+ static u64 dummy_mask = DMA_BIT_MASK(32);
if (usb_disabled()) {
result = -ENODEV;
@@ -162,9 +179,9 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
(unsigned long)virq);
- ps3_system_bus_set_driver_data(dev, hcd);
+ ps3_system_bus_set_drvdata(dev, hcd);
- result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
+ result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
@@ -172,6 +189,7 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
goto fail_add_hcd;
}
+ device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
@@ -195,8 +213,7 @@ fail_start:
static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
- struct usb_hcd *hcd =
- (struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
+ struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
BUG_ON(!hcd);
@@ -205,10 +222,9 @@ static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
tmp = hcd->irq;
- ehci_shutdown(hcd);
usb_remove_hcd(hcd);
- ps3_system_bus_set_driver_data(dev, NULL);
+ ps3_system_bus_set_drvdata(dev, NULL);
BUG_ON(!hcd->regs);
iounmap(hcd->regs);
@@ -225,7 +241,7 @@ static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
return 0;
}
-static int ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
+static int __init ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
{
return firmware_has_feature(FW_FEATURE_PS3_LV1)
? ps3_system_bus_driver_register(drv)
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 3712b925b31..54f5332f814 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -87,31 +87,31 @@ qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
static inline void
qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
+ struct ehci_qh_hw *hw = qh->hw;
+
/* writes to an active overlay are unsafe */
- BUG_ON(qh->qh_state != QH_STATE_IDLE);
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
- qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
- qh->hw_alt_next = EHCI_LIST_END(ehci);
+ hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
+ hw->hw_alt_next = EHCI_LIST_END(ehci);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
- if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
+ if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
unsigned is_out, epnum;
- is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
- epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
- if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
- qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
- usb_settoggle (qh->dev, epnum, is_out, 1);
+ is_out = qh->is_out;
+ epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
+ if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
+ hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+ usb_settoggle(qh->ps.udev, epnum, is_out, 1);
}
}
- /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
- wmb ();
- qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
+ hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
@@ -123,22 +123,72 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
- if (list_empty (&qh->qtd_list))
- qtd = qh->dummy;
- else {
- qtd = list_entry (qh->qtd_list.next,
- struct ehci_qtd, qtd_list);
- /* first qtd may already be partially processed */
- if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
- qtd = NULL;
- }
+ qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
- if (qtd)
- qh_update (ehci, qh, qtd);
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (qh->hw->hw_token & ACTIVE_BIT(ehci))
+ qh->hw->hw_qtd_next = qtd->hw_next;
+ else
+ qh_update(ehci, qh, qtd);
}
/*-------------------------------------------------------------------------*/
+static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
+
+static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct ehci_qh *qh = ep->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+ qh->clearing_tt = 0;
+ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+ && ehci->rh_state == EHCI_RH_RUNNING)
+ qh_link_async(ehci, qh);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+}
+
+static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
+ struct urb *urb, u32 token)
+{
+
+ /* If an async split transaction gets an error or is unlinked,
+ * the TT buffer may be left in an indeterminate state. We
+ * have to clear the TT buffer.
+ *
+ * Note: this routine is never called for Isochronous transfers.
+ */
+ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+#ifdef CONFIG_DYNAMIC_DEBUG
+ struct usb_device *tt = urb->dev->tt->hub;
+ dev_dbg(&tt->dev,
+ "clear tt buffer port %d, a%d ep%d t%08x\n",
+ urb->dev->ttport, urb->dev->devnum,
+ usb_pipeendpoint(urb->pipe), token);
+#endif /* CONFIG_DYNAMIC_DEBUG */
+ if (!ehci_is_TDI(ehci)
+ || urb->dev->tt->hub !=
+ ehci_to_hcd(ehci)->self.root_hub) {
+ if (usb_hub_clear_tt_buffer(urb) == 0)
+ qh->clearing_tt = 1;
+ } else {
+
+ /* REVISIT ARC-derived cores don't clear the root
+ * hub TT buffer in this way...
+ */
+ }
+ }
+}
+
static int qtd_copy_status (
struct ehci_hcd *ehci,
struct urb *urb,
@@ -165,6 +215,14 @@ static int qtd_copy_status (
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
status = -EOVERFLOW;
+ /* CERR nonzero + halt --> stall */
+ } else if (QTD_CERR(token)) {
+ status = -EPIPE;
+
+ /* In theory, more than one of the following bits can be set
+ * since they are sticky and the transaction is retried.
+ * Which to test first is rather arbitrary.
+ */
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
status = -EPROTO;
@@ -173,49 +231,14 @@ static int qtd_copy_status (
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
- /* timeout, bad crc, wrong PID, etc; retried */
- if (QTD_CERR (token))
- status = -EPIPE;
- else {
- ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n",
- urb->dev->devpath,
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out");
- status = -EPROTO;
- }
- /* CERR nonzero + no errors + halt --> stall */
- } else if (QTD_CERR (token))
- status = -EPIPE;
- else /* unknown */
+ /* timeout, bad CRC, wrong PID, etc */
+ ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+ status = -EPROTO;
+ } else { /* unknown */
status = -EPROTO;
-
- ehci_vdbg (ehci,
- "dev%d ep%d%s qtd token %08x --> status %d\n",
- usb_pipedevice (urb->pipe),
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
- token, status);
-
- /* if async CSPLIT failed, try cleaning out the TT buffer */
- if (status != -EPIPE
- && urb->dev->tt
- && !usb_pipeint(urb->pipe)
- && ((token & QTD_STS_MMF) != 0
- || QTD_CERR(token) == 0)
- && (!ehci_is_TDI(ehci)
- || urb->dev->tt->hub !=
- ehci_to_hcd(ehci)->self.root_hub)) {
-#ifdef DEBUG
- struct usb_device *tt = urb->dev->tt->hub;
- dev_dbg (&tt->dev,
- "clear tt buffer port %d, a%d ep%d t%08x\n",
- urb->dev->ttport, urb->dev->devnum,
- usb_pipeendpoint (urb->pipe), token);
-#endif /* DEBUG */
- /* REVISIT ARC-derived cores don't clear the root
- * hub TT buffer in this way...
- */
- usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
}
}
@@ -224,19 +247,10 @@ static int qtd_copy_status (
static void
ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
-__releases(ehci->lock)
-__acquires(ehci->lock)
{
- if (likely (urb->hcpriv != NULL)) {
- struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
-
- /* S-mask in a QH means it's an interrupt urb */
- if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
-
- /* ... update hc-wide periodic stats (for usbfs) */
- ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
- }
- qh_put (qh);
+ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+ /* ... update hc-wide periodic stats */
+ ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
}
if (unlikely(urb->unlinked)) {
@@ -258,48 +272,46 @@ __acquires(ehci->lock)
urb->actual_length, urb->transfer_buffer_length);
#endif
- /* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
- spin_unlock (&ehci->lock);
usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
- spin_lock (&ehci->lock);
}
-static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
-static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
-
-static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/*
* Process and free completed qtds for a qh, returning URBs to drivers.
- * Chases up to qh->hw_current. Returns number of completions called,
- * indicating how much "real" work we did.
+ * Chases up to qh->hw_current. Returns nonzero if the caller should
+ * unlink qh.
*/
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- struct ehci_qtd *last = NULL, *end = qh->dummy;
+ struct ehci_qtd *last, *end = qh->dummy;
struct list_head *entry, *tmp;
- int last_status = -EINPROGRESS;
+ int last_status;
int stopped;
- unsigned count = 0;
u8 state;
- __le32 halt = HALT_BIT(ehci);
-
- if (unlikely (list_empty (&qh->qtd_list)))
- return count;
+ struct ehci_qh_hw *hw = qh->hw;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
+ *
+ * It's a bug for qh->qh_state to be anything other than
+ * QH_STATE_IDLE, unless our caller is scan_async() or
+ * scan_intr().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
+ rescan:
+ last = NULL;
+ last_status = -EINPROGRESS;
+ qh->dequeue_during_giveback = 0;
+
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
@@ -317,7 +329,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (last) {
if (likely (last->urb != urb)) {
ehci_urb_done(ehci, last->urb, last_status);
- count++;
last_status = -EINPROGRESS;
}
ehci_qtd_free (ehci, last);
@@ -333,12 +344,51 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
token = hc32_to_cpu(ehci, qtd->hw_token);
/* always clean up qtds the hc de-activated */
+ retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ ehci_dbg(ehci,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
if ((token & QTD_STS_HALT) != 0) {
+
+ /* retry transaction errors until we
+ * reach the software xacterr limit
+ */
+ if ((token & QTD_STS_XACT) &&
+ QTD_CERR(token) == 0 &&
+ ++qh->xacterrs < QH_XACTERR_MAX &&
+ !urb->unlinked) {
+ ehci_dbg(ehci,
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+ /* reset the token in the qtd and the
+ * qh overlay (which still contains
+ * the qtd) so that we pick up from
+ * where we left off
+ */
+ token &= ~QTD_STS_HALT;
+ token |= QTD_STS_ACTIVE |
+ (EHCI_TUNE_CERR << 10);
+ qtd->hw_token = cpu_to_hc32(ehci,
+ token);
+ wmb();
+ hw->hw_token = cpu_to_hc32(ehci,
+ token);
+ goto retry_xacterr;
+ }
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
@@ -354,12 +404,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
&& !(qtd->hw_alt_next
& EHCI_LIST_END(ehci))) {
stopped = 1;
- goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped
- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
+ && ehci->rh_state >= EHCI_RH_RUNNING)) {
break;
/* scan the whole queue for unlinks whenever it stops */
@@ -367,7 +416,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
stopped = 1;
/* cancel everything if we halt, suspend, etc */
- if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))
+ if (ehci->rh_state < EHCI_RH_RUNNING)
last_status = -ESHUTDOWN;
/* this qtd is active; skip it unless a previous qtd
@@ -376,20 +425,25 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
else if (last_status == -EINPROGRESS && !urb->unlinked)
continue;
- /* qh unlinked; token in overlay may be most current */
- if (state == QH_STATE_IDLE
- && cpu_to_hc32(ehci, qtd->qtd_dma)
- == qh->hw_current)
- token = hc32_to_cpu(ehci, qh->hw_token);
-
- /* force halt for unlinked or blocked qh, so we'll
- * patch the qh later and so that completions can't
- * activate it while we "know" it's stopped.
+ /*
+ * If this was the active qtd when the qh was unlinked
+ * and the overlay's token is active, then the overlay
+ * hasn't been written back to the qtd yet so use its
+ * token instead of the qtd's. After the qtd is
+ * processed and removed, the overlay won't be valid
+ * any more.
*/
- if ((halt & qh->hw_token) == 0) {
-halt:
- qh->hw_token |= halt;
- wmb ();
+ if (state == QH_STATE_IDLE &&
+ qh->qtd_list.next == &qtd->qtd_list &&
+ (hw->hw_token & ACTIVE_BIT(ehci))) {
+ token = hc32_to_cpu(ehci, hw->hw_token);
+ hw->hw_token &= ~ACTIVE_BIT(ehci);
+
+ /* An unlink may leave an incomplete
+ * async transaction in the TT buffer.
+ * We have to clear it.
+ */
+ ehci_clear_tt_buffer(ehci, qh, urb, token);
}
}
@@ -407,6 +461,25 @@ halt:
&& (qtd->hw_alt_next
& EHCI_LIST_END(ehci)))
last_status = -EINPROGRESS;
+
+ /* As part of low/full-speed endpoint-halt processing
+ * we must clear the TT buffer (11.17.5).
+ */
+ if (unlikely(last_status != -EINPROGRESS &&
+ last_status != -EREMOTEIO)) {
+ /* The TT's in some hubs malfunction when they
+ * receive this request following a STALL (they
+ * stop sending isochronous packets). Since a
+ * STALL can't leave the TT buffer in a busy
+ * state (if you believe Figures 11-48 - 11-51
+ * in the USB 2.0 spec), we won't clear the TT
+ * buffer in this case. Strictly speaking this
+ * is a violation of the spec.
+ */
+ if (last_status != -EPIPE)
+ ehci_clear_tt_buffer(ehci, qh, urb,
+ token);
+ }
}
/* if we're removing something not at the queue head,
@@ -421,51 +494,49 @@ halt:
/* remove qtd; it's recycled after possible urb completion */
list_del (&qtd->qtd_list);
last = qtd;
+
+ /* reinit the xacterr counter for the next qtd */
+ qh->xacterrs = 0;
}
/* last urb's completion might still need calling */
if (likely (last != NULL)) {
ehci_urb_done(ehci, last->urb, last_status);
- count++;
ehci_qtd_free (ehci, last);
}
+ /* Do we need to rescan for URBs dequeued during a giveback? */
+ if (unlikely(qh->dequeue_during_giveback)) {
+ /* If the QH is already unlinked, do the rescan now. */
+ if (state == QH_STATE_IDLE)
+ goto rescan;
+
+ /* Otherwise the caller must unlink the QH. */
+ }
+
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
+ *
+ * We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
+ * except maybe high bandwidth ...
*/
- if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
- switch (state) {
- case QH_STATE_IDLE:
- qh_refresh(ehci, qh);
- break;
- case QH_STATE_LINKED:
- /* We won't refresh a QH that's linked (after the HC
- * stopped the queue). That avoids a race:
- * - HC reads first part of QH;
- * - CPU updates that first part and the token;
- * - HC reads rest of that QH, including token
- * Result: HC gets an inconsistent image, and then
- * DMAs to/from the wrong memory (corrupting it).
- *
- * That should be rare for interrupt transfers,
- * except maybe high bandwidth ...
- */
- if ((cpu_to_hc32(ehci, QH_SMASK)
- & qh->hw_info2) != 0) {
- intr_deschedule (ehci, qh);
- (void) qh_schedule (ehci, qh);
- } else
- unlink_async (ehci, qh);
- break;
- /* otherwise, unlink already started */
- }
- }
+ if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
+ qh->exception = 1;
- return count;
+ /* Let the caller know if the QH needs to be unlinked. */
+ return qh->exception;
}
/*-------------------------------------------------------------------------*/
@@ -507,9 +578,11 @@ qh_urb_transaction (
) {
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
- int len, maxpacket;
+ int len, this_sg_len, maxpacket;
int is_input;
u32 token;
+ int i;
+ struct scatterlist *sg;
/*
* URBs map to sequences of QTDs: one logical transaction
@@ -550,7 +623,20 @@ qh_urb_transaction (
/*
* data transfer stage: buffer setup
*/
- buf = urb->transfer_dma;
+ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ buf = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ buf = urb->transfer_dma;
+ this_sg_len = len;
+ }
if (is_input)
token |= (1 /* "in" */ << 8);
@@ -566,7 +652,9 @@ qh_urb_transaction (
for (;;) {
int this_qtd_len;
- this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+ this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
+ maxpacket);
+ this_sg_len -= this_qtd_len;
len -= this_qtd_len;
buf += this_qtd_len;
@@ -576,14 +664,19 @@ qh_urb_transaction (
* (this will usually be overridden later.)
*/
if (is_input)
- qtd->hw_alt_next = ehci->async->hw_alt_next;
+ qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
- if (likely (len <= 0))
- break;
+ if (likely(this_sg_len <= 0)) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ buf = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
@@ -605,7 +698,8 @@ qh_urb_transaction (
/*
* control requests may need a terminating data "status" ack;
- * bulk ones may need a terminating short packet (zero length).
+ * other OUT ones may need a terminating short packet
+ * (zero length).
*/
if (likely (urb->transfer_buffer_length != 0)) {
int one_more = 0;
@@ -614,7 +708,7 @@ qh_urb_transaction (
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
- } else if (usb_pipebulk (urb->pipe)
+ } else if (usb_pipeout(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
@@ -671,6 +765,7 @@ qh_make (
int is_input, type;
int maxp = 0;
struct usb_tt *tt = urb->dev->tt;
+ struct ehci_qh_hw *hw;
if (!qh)
return qh;
@@ -702,25 +797,35 @@ qh_make (
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
- qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ unsigned tmp;
+
+ qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
- qh->start = NO_FRAME;
+ qh->ps.phase = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
- qh->c_usecs = 0;
+ qh->ps.c_usecs = 0;
qh->gap_uf = 0;
- qh->period = urb->interval >> 3;
- if (qh->period == 0 && urb->interval != 1) {
+ if (urb->interval > 1 && urb->interval < 8) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
- dbg ("intr period %d uframes, NYET!",
- urb->interval);
- goto done;
+ urb->interval = 1;
+ } else if (urb->interval > ehci->periodic_size << 3) {
+ urb->interval = ehci->periodic_size << 3;
}
+ qh->ps.period = urb->interval >> 3;
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
} else {
int think_time;
@@ -730,28 +835,40 @@ qh_make (
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
- qh->c_usecs = qh->usecs + HS_USECS (0);
- qh->usecs = HS_USECS (1);
+ qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
+ qh->ps.usecs = HS_USECS(1);
} else { // SPLIT+DATA, gap, CSPLIT
- qh->usecs += HS_USECS (1);
- qh->c_usecs = HS_USECS (0);
+ qh->ps.usecs += HS_USECS(1);
+ qh->ps.c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
- qh->tt_usecs = NS_TO_US (think_time +
+ qh->ps.tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
- qh->period = urb->interval;
+ if (urb->interval > ehci->periodic_size)
+ urb->interval = ehci->periodic_size;
+ qh->ps.period = urb->interval;
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+ urb->ep->desc.bInterval);
+ tmp = rounddown_pow_of_two(tmp);
+
+ /* Allow urb->interval to override */
+ qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_uperiod = qh->ps.bw_period << 3;
}
}
/* support for tt scheduling, and access to toggles */
- qh->dev = urb->dev;
+ qh->ps.udev = urb->dev;
+ qh->ps.ep = urb->ep;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
- info1 |= (1 << 12); /* EPS "low" */
+ info1 |= QH_LOW_SPEED;
/* FALL THROUGH */
case USB_SPEED_FULL:
@@ -759,8 +876,8 @@ qh_make (
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
- info1 |= (1 << 27); /* for TT */
- info1 |= 1 << 14; /* toggle from qtd */
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
}
info1 |= maxp << 16;
@@ -785,11 +902,11 @@ qh_make (
break;
case USB_SPEED_HIGH: /* no TT involved */
- info1 |= (2 << 12); /* EPS "high" */
+ info1 |= QH_HIGH_SPEED;
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
- info1 |= 1 << 14; /* toggle from qtd */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
@@ -807,25 +924,53 @@ qh_make (
}
break;
default:
- dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
+ ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
+ urb->dev->speed);
done:
- qh_put (qh);
+ qh_destroy(ehci, qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
- /* init as live, toggle clear, advance to dummy */
+ /* init as live, toggle clear */
qh->qh_state = QH_STATE_IDLE;
- qh->hw_info1 = cpu_to_hc32(ehci, info1);
- qh->hw_info2 = cpu_to_hc32(ehci, info2);
+ hw = qh->hw;
+ hw->hw_info1 = cpu_to_hc32(ehci, info1);
+ hw->hw_info2 = cpu_to_hc32(ehci, info2);
+ qh->is_out = !is_input;
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
- qh_refresh (ehci, qh);
return qh;
}
/*-------------------------------------------------------------------------*/
+static void enable_async(struct ehci_hcd *ehci)
+{
+ if (ehci->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ ehci_poll_ASS(ehci);
+ turn_on_io_watchdog(ehci);
+}
+
+static void disable_async(struct ehci_hcd *ehci)
+{
+ if (--ehci->async_count)
+ return;
+
+ /* The async schedule and unlink lists are supposed to be empty */
+ WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
+ !list_empty(&ehci->async_idle));
+
+ /* Don't turn off the schedule until ASS is 1 */
+ ehci_poll_ASS(ehci);
+}
+
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
@@ -833,37 +978,30 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
__hc32 dma = QH_NEXT(ehci, qh->qh_dma);
struct ehci_qh *head;
- /* (re)start the async schedule? */
- head = ehci->async;
- timer_action_done (ehci, TIMER_ASYNC_OFF);
- if (!head->qh_next.qh) {
- u32 cmd = ehci_readl(ehci, &ehci->regs->command);
-
- if (!(cmd & CMD_ASE)) {
- /* in case a clear of CMD_ASE didn't take yet */
- (void)handshake(ehci, &ehci->regs->status,
- STS_ASS, 0, 150);
- cmd |= CMD_ASE | CMD_RUN;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
- /* posted write need not be known to HC yet ... */
- }
- }
+ /* Don't link a QH if there's a Clear-TT-Buffer pending */
+ if (unlikely(qh->clearing_tt))
+ return;
+
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
/* clear halt and/or toggle; and maybe recover from silicon quirk */
- if (qh->qh_state == QH_STATE_IDLE)
- qh_refresh (ehci, qh);
+ qh_refresh(ehci, qh);
/* splice right after start */
+ head = ehci->async;
qh->qh_next = head->qh_next;
- qh->hw_next = head->hw_next;
+ qh->hw->hw_next = head->hw->hw_next;
wmb ();
head->qh_next.qh = qh;
- head->hw_next = dma;
+ head->hw->hw_next = dma;
qh->qh_state = QH_STATE_LINKED;
+ qh->xacterrs = 0;
+ qh->exception = 0;
/* qtd completions reported later by interrupt */
+
+ enable_async(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -905,7 +1043,7 @@ static struct ehci_qh *qh_append_tds (
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0)
- qh->hw_info1 &= ~qh_addr_mask;
+ qh->hw->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
@@ -923,7 +1061,7 @@ static struct ehci_qh *qh_append_tds (
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT(ehci);
- wmb ();
+
dummy = qh->dummy;
dma = dummy->qtd_dma;
@@ -947,7 +1085,7 @@ static struct ehci_qh *qh_append_tds (
wmb ();
dummy->hw_token = token;
- urb->hcpriv = qh_get (qh);
+ urb->hcpriv = qh;
}
}
return qh;
@@ -962,27 +1100,28 @@ submit_async (
struct list_head *qtd_list,
gfp_t mem_flags
) {
- struct ehci_qtd *qtd;
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc;
- qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
- ehci_dbg (ehci,
- "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- __func__, urb->dev->devpath, urb,
- epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
- urb->transfer_buffer_length,
- qtd, urb->ep->hcpriv);
+ {
+ struct ehci_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+ ehci_dbg(ehci,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
#endif
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
rc = -ESHUTDOWN;
goto done;
}
@@ -1001,7 +1140,7 @@ submit_async (
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh->qh_state == QH_STATE_IDLE))
- qh_link_async (ehci, qh_get (qh));
+ qh_link_async(ehci, qh);
done:
spin_unlock_irqrestore (&ehci->lock, flags);
if (unlikely (qh == NULL))
@@ -1010,156 +1149,328 @@ submit_async (
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+/*
+ * This function creates the qtds and submits them for the
+ * SINGLE_STEP_SET_FEATURE Test.
+ * This is done in two parts: first SETUP req for GetDesc is sent then
+ * 15 seconds later, the IN stage for GetDesc starts to req data from dev
+ *
+ * is_setup : i/p arguement decides which of the two stage needs to be
+ * performed; TRUE - SETUP and FALSE - IN+STATUS
+ * Returns 0 if success
+ */
+static int submit_single_step_set_feature(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ int is_setup
+) {
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct list_head qtd_list;
+ struct list_head *head;
-/* the async qh for the qtds being reclaimed are now unlinked from the HC */
+ struct ehci_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, maxpacket;
+ u32 token;
-static void end_unlink_async (struct ehci_hcd *ehci)
-{
- struct ehci_qh *qh = ehci->reclaim;
- struct ehci_qh *next;
+ INIT_LIST_HEAD(&qtd_list);
+ head = &qtd_list;
- iaa_watchdog_done(ehci);
+ /* URBs map to sequences of QTDs: one logical transaction */
+ qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
+ if (unlikely(!qtd))
+ return -1;
+ list_add_tail(&qtd->qtd_list, head);
+ qtd->urb = urb;
- // qh->hw_next = cpu_to_hc32(qh->qh_dma);
- qh->qh_state = QH_STATE_IDLE;
- qh->qh_next.qh = NULL;
- qh_put (qh); // refcount from reclaim
+ token = QTD_STS_ACTIVE;
+ token |= (EHCI_TUNE_CERR << 10);
- /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
- next = qh->reclaim;
- ehci->reclaim = next;
- qh->reclaim = NULL;
+ len = urb->transfer_buffer_length;
+ /*
+ * Check if the request is to perform just the SETUP stage (getDesc)
+ * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
+ * 15 secs after the setup
+ */
+ if (is_setup) {
+ /* SETUP pid */
+ qtd_fill(ehci, qtd, urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
- qh_completions (ehci, qh);
+ submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
+ return 0; /*Return now; we shall come back after 15 seconds*/
+ }
- if (!list_empty (&qh->qtd_list)
- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
- qh_link_async (ehci, qh);
- else {
- qh_put (qh); // refcount from async list
+ /*
+ * IN: data transfer stage: buffer setup : start the IN txn phase for
+ * the get_Desc SETUP which was sent 15seconds back
+ */
+ token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/
+ buf = urb->transfer_dma;
- /* it's not free to turn the async schedule on/off; leave it
- * active but idle for a while once it empties.
- */
- if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
- && ehci->async->qh_next.qh == NULL)
- timer_action (ehci, TIMER_ASYNC_OFF);
- }
+ token |= (1 /* "in" */ << 8); /*This is IN stage*/
- if (next) {
- ehci->reclaim = NULL;
- start_unlink_async (ehci, next);
- }
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+
+ qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+
+ /*
+ * Our IN phase shall always be a short read; so keep the queue running
+ * and let it advance to the next qtd which zero length OUT status
+ */
+ qtd->hw_alt_next = EHCI_LIST_END(ehci);
+
+ /* STATUS stage for GetDesc control request */
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+
+ qtd_prev = qtd;
+ qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* dont fill any data in such packets */
+ qtd_fill(ehci, qtd, 0, 0, token, 0);
+
+ /* by default, enable interrupt on urb completion */
+ if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+
+ submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
+
+ return 0;
+
+cleanup:
+ qtd_list_free(ehci, urb, head);
+ return -1;
}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
-/* makes sure the async qh will become idle */
-/* caller must own ehci->lock */
+/*-------------------------------------------------------------------------*/
-static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- int cmd = ehci_readl(ehci, &ehci->regs->command);
- struct ehci_qh *prev;
-
-#ifdef DEBUG
- assert_spin_locked(&ehci->lock);
- if (ehci->reclaim
- || (qh->qh_state != QH_STATE_LINKED
- && qh->qh_state != QH_STATE_UNLINK_WAIT)
- )
- BUG ();
-#endif
-
- /* stop async schedule right now? */
- if (unlikely (qh == ehci->async)) {
- /* can't get here without STS_ASS set */
- if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
- && !ehci->reclaim) {
- /* ... and CMD_IAAD clear */
- ehci_writel(ehci, cmd & ~CMD_ASE,
- &ehci->regs->command);
- wmb ();
- // handshake later, if we need to
- timer_action_done (ehci, TIMER_ASYNC_OFF);
- }
- return;
- }
+ struct ehci_qh *prev;
- qh->qh_state = QH_STATE_UNLINK;
- ehci->reclaim = qh = qh_get (qh);
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK_WAIT;
+ list_add_tail(&qh->unlink_node, &ehci->async_unlink);
+ /* Unlink it from the schedule */
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
- prev->hw_next = qh->hw_next;
+ prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
- wmb ();
+ if (ehci->qh_scan_next == qh)
+ ehci->qh_scan_next = qh->qh_next.qh;
+}
- if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) {
- /* if (unlikely (qh->reclaim != 0))
- * this will recurse, probably not much
- */
- end_unlink_async (ehci);
+static void start_iaa_cycle(struct ehci_hcd *ehci)
+{
+ /* Do nothing if an IAA cycle is already running */
+ if (ehci->iaa_in_progress)
return;
+ ehci->iaa_in_progress = true;
+
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
+ end_unlink_async(ehci);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ ehci_writel(ehci, ehci->command | CMD_IAAD,
+ &ehci->regs->command);
+ ehci_readl(ehci, &ehci->regs->command);
+ ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
}
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+ bool early_exit;
- cmd |= CMD_IAAD;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- (void)ehci_readl(ehci, &ehci->regs->command);
- iaa_watchdog_start(ehci);
+ if (ehci->has_synopsys_hc_bug)
+ ehci_writel(ehci, (u32) ehci->async->qh_dma,
+ &ehci->regs->async_next);
+
+ /* The current IAA cycle has ended */
+ ehci->iaa_in_progress = false;
+
+ if (list_empty(&ehci->async_unlink))
+ return;
+ qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
+ unlink_node); /* QH whose IAA cycle just ended */
+
+ /*
+ * If async_unlinking is set then this routine is already running,
+ * either on the stack or on another CPU.
+ */
+ early_exit = ehci->async_unlinking;
+
+ /* If the controller isn't running, process all the waiting QHs */
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
+
+ /*
+ * Intel (?) bug: The HC can write back the overlay region even
+ * after the IAA interrupt occurs. In self-defense, always go
+ * through two IAA cycles for each QH.
+ */
+ else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
+ qh->qh_state = QH_STATE_UNLINK;
+ early_exit = true;
+ }
+
+ /* Otherwise process only the first waiting QH (NVIDIA bug?) */
+ else
+ list_move_tail(&qh->unlink_node, &ehci->async_idle);
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (!list_empty(&ehci->async_unlink))
+ start_iaa_cycle(ehci);
+
+ /*
+ * Don't allow nesting or concurrent calls,
+ * or wait for the second IAA cycle for the next QH.
+ */
+ if (early_exit)
+ return;
+
+ /* Process the idle QHs */
+ ehci->async_unlinking = true;
+ while (!list_empty(&ehci->async_idle)) {
+ qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
+ unlink_node);
+ list_del(&qh->unlink_node);
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ if (!list_empty(&qh->qtd_list))
+ qh_completions(ehci, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ ehci->rh_state == EHCI_RH_RUNNING)
+ qh_link_async(ehci, qh);
+ disable_async(ehci);
+ }
+ ehci->async_unlinking = false;
}
-/*-------------------------------------------------------------------------*/
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
-static void scan_async (struct ehci_hcd *ehci)
+static void unlink_empty_async(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
- enum ehci_timer_action action = TIMER_IO_WATCHDOG;
+ struct ehci_qh *qh_to_unlink = NULL;
+ int count = 0;
+
+ /* Find the last async QH which has been empty for a timer cycle */
+ for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ ++count;
+ if (qh->unlink_cycle != ehci->async_unlink_cycle)
+ qh_to_unlink = qh;
+ }
+ }
- ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
- timer_action_done (ehci, TIMER_ASYNC_SHRINK);
-rescan:
- qh = ehci->async->qh_next.qh;
- if (likely (qh != NULL)) {
- do {
- /* clean any finished work for this qh */
- if (!list_empty (&qh->qtd_list)
- && qh->stamp != ehci->stamp) {
- int temp;
-
- /* unlinks could happen here; completion
- * reporting drops the lock. rescan using
- * the latest schedule, but don't rescan
- * qhs we already finished (no looping).
- */
- qh = qh_get (qh);
- qh->stamp = ehci->stamp;
- temp = qh_completions (ehci, qh);
- qh_put (qh);
- if (temp != 0) {
- goto rescan;
- }
- }
+ /* If nothing else is being unlinked, unlink the last empty QH */
+ if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
+ start_unlink_async(ehci, qh_to_unlink);
+ --count;
+ }
+
+ /* Other QHs will be handled later */
+ if (count > 0) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+ ++ehci->async_unlink_cycle;
+ }
+}
+
+/* The root hub is suspended; unlink all the async QHs */
+static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
- /* unlink idle entries, reducing DMA usage as well
- * as HCD schedule-scanning costs. delay for any qh
- * we just scanned, there's a not-unusual case that it
- * doesn't stay idle for long.
- * (plus, avoids some kind of re-activation race.)
+ while (ehci->async->qh_next.qh) {
+ qh = ehci->async->qh_next.qh;
+ WARN_ON(!list_empty(&qh->qtd_list));
+ single_unlink_async(ehci, qh);
+ }
+ start_iaa_cycle(ehci);
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own ehci->lock */
+
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do. */
+ if (qh->qh_state != QH_STATE_LINKED)
+ return;
+
+ single_unlink_async(ehci, qh);
+ start_iaa_cycle(ehci);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async (struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+ bool check_unlinks_later = false;
+
+ ehci->qh_scan_next = ehci->async->qh_next.qh;
+ while (ehci->qh_scan_next) {
+ qh = ehci->qh_scan_next;
+ ehci->qh_scan_next = qh->qh_next.qh;
+
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why ehci->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then ehci->qh_scan_next is adjusted
+ * in single_unlink_async().
*/
- if (list_empty(&qh->qtd_list)
+ temp = qh_completions(ehci, qh);
+ if (unlikely(temp)) {
+ start_unlink_async(ehci, qh);
+ } else if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
- if (!ehci->reclaim
- && ((ehci->stamp - qh->stamp) & 0x1fff)
- >= (EHCI_SHRINK_FRAMES * 8))
- start_unlink_async(ehci, qh);
- else
- action = TIMER_ASYNC_SHRINK;
+ qh->unlink_cycle = ehci->async_unlink_cycle;
+ check_unlinks_later = true;
}
+ }
+ }
- qh = qh->qh_next.qh;
- } while (qh);
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
+ !(ehci->enabled_hrtimer_events &
+ BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+ ++ehci->async_unlink_cycle;
}
- if (action == TIMER_ASYNC_SHRINK)
- timer_action (ehci, TIMER_ASYNC_SHRINK);
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a081ee65bde..e113fd73aea 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -36,8 +36,6 @@
static int ehci_get_frame (struct usb_hcd *hcd);
-/*-------------------------------------------------------------------------*/
-
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
@@ -60,6 +58,20 @@ periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
}
}
+static __hc32 *
+shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
+ __hc32 tag)
+{
+ switch (hc32_to_cpu(ehci, tag)) {
+ /* our ehci_shadow.qh is actually software part */
+ case Q_TYPE_QH:
+ return &periodic->qh->hw->hw_next;
+ /* others are hw parts */
+ default:
+ return periodic->hw_next;
+ }
+}
+
/* caller must hold ehci->lock */
static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
@@ -71,7 +83,8 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(ehci, prev_p,
Q_NEXT_TYPE(ehci, *hw_p));
- hw_p = here.hw_next;
+ hw_p = shadow_next_periodic(ehci, &here,
+ Q_NEXT_TYPE(ehci, *hw_p));
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
@@ -83,81 +96,217 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
*/
*prev_p = *periodic_next_shadow(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
- *hw_p = *here.hw_next;
+
+ if (!ehci->use_dummy_qh ||
+ *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
+ != EHCI_LIST_END(ehci))
+ *hw_p = *shadow_next_periodic(ehci, &here,
+ Q_NEXT_TYPE(ehci, *hw_p));
+ else
+ *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
}
-/* how many of the uframe's 125 usecs are allocated? */
-static unsigned short
-periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
+/*-------------------------------------------------------------------------*/
+
+/* Bandwidth and TT management */
+
+/* Find the TT data structure for this device; create it if necessary */
+static struct ehci_tt *find_tt(struct usb_device *udev)
{
- __hc32 *hw_p = &ehci->periodic [frame];
- union ehci_shadow *q = &ehci->pshadow [frame];
- unsigned usecs = 0;
-
- while (q->ptr) {
- switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
- case Q_TYPE_QH:
- /* is it in the S-mask? */
- if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
- usecs += q->qh->usecs;
- /* ... or C-mask? */
- if (q->qh->hw_info2 & cpu_to_hc32(ehci,
- 1 << (8 + uframe)))
- usecs += q->qh->c_usecs;
- hw_p = &q->qh->hw_next;
- q = &q->qh->qh_next;
- break;
- // case Q_TYPE_FSTN:
- default:
- /* for "save place" FSTNs, count the relevant INTR
- * bandwidth from the previous frame
- */
- if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
- ehci_dbg (ehci, "ignoring FSTN cost ...\n");
- }
- hw_p = &q->fstn->hw_next;
- q = &q->fstn->fstn_next;
- break;
- case Q_TYPE_ITD:
- if (q->itd->hw_transaction[uframe])
- usecs += q->itd->stream->usecs;
- hw_p = &q->itd->hw_next;
- q = &q->itd->itd_next;
- break;
- case Q_TYPE_SITD:
- /* is it in the S-mask? (count SPLIT, DATA) */
- if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
- 1 << uframe)) {
- if (q->sitd->hw_fullspeed_ep &
- cpu_to_hc32(ehci, 1<<31))
- usecs += q->sitd->stream->usecs;
- else /* worst case for OUT start-split */
- usecs += HS_USECS_ISO (188);
- }
+ struct usb_tt *utt = udev->tt;
+ struct ehci_tt *tt, **tt_index, **ptt;
+ unsigned port;
+ bool allocated_index = false;
+
+ if (!utt)
+ return NULL; /* Not below a TT */
+
+ /*
+ * Find/create our data structure.
+ * For hubs with a single TT, we get it directly.
+ * For hubs with multiple TTs, there's an extra level of pointers.
+ */
+ tt_index = NULL;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ if (!tt_index) { /* Create the index array */
+ tt_index = kzalloc(utt->hub->maxchild *
+ sizeof(*tt_index), GFP_ATOMIC);
+ if (!tt_index)
+ return ERR_PTR(-ENOMEM);
+ utt->hcpriv = tt_index;
+ allocated_index = true;
+ }
+ port = udev->ttport - 1;
+ ptt = &tt_index[port];
+ } else {
+ port = 0;
+ ptt = (struct ehci_tt **) &utt->hcpriv;
+ }
- /* ... C-mask? (count CSPLIT, DATA) */
- if (q->sitd->hw_uframe &
- cpu_to_hc32(ehci, 1 << (8 + uframe))) {
- /* worst case for IN complete-split */
- usecs += q->sitd->stream->c_usecs;
+ tt = *ptt;
+ if (!tt) { /* Create the ehci_tt */
+ struct ehci_hcd *ehci =
+ hcd_to_ehci(bus_to_hcd(udev->bus));
+
+ tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
+ if (!tt) {
+ if (allocated_index) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
}
+ return ERR_PTR(-ENOMEM);
+ }
+ list_add_tail(&tt->tt_list, &ehci->tt_list);
+ INIT_LIST_HEAD(&tt->ps_list);
+ tt->usb_tt = utt;
+ tt->tt_port = port;
+ *ptt = tt;
+ }
- hw_p = &q->sitd->hw_next;
- q = &q->sitd->sitd_next;
- break;
+ return tt;
+}
+
+/* Release the TT above udev, if it's not in use */
+static void drop_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct ehci_tt *tt, **tt_index, **ptt;
+ int cnt, i;
+
+ if (!utt || !utt->hcpriv)
+ return; /* Not below a TT, or never allocated */
+
+ cnt = 0;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ ptt = &tt_index[udev->ttport - 1];
+
+ /* How many entries are left in tt_index? */
+ for (i = 0; i < utt->hub->maxchild; ++i)
+ cnt += !!tt_index[i];
+ } else {
+ tt_index = NULL;
+ ptt = (struct ehci_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt || !list_empty(&tt->ps_list))
+ return; /* never allocated, or still in use */
+
+ list_del(&tt->tt_list);
+ *ptt = NULL;
+ kfree(tt);
+ if (cnt == 1) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+}
+
+static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
+ struct ehci_per_sched *ps)
+{
+ dev_dbg(&ps->udev->dev,
+ "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
+ ps->ep->desc.bEndpointAddress,
+ (sign >= 0 ? "reserve" : "release"), type,
+ (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
+ ps->phase, ps->phase_uf, ps->period,
+ ps->usecs, ps->c_usecs, ps->cs_mask);
+}
+
+static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
+ struct ehci_qh *qh, int sign)
+{
+ unsigned start_uf;
+ unsigned i, j, m;
+ int usecs = qh->ps.usecs;
+ int c_usecs = qh->ps.c_usecs;
+ int tt_usecs = qh->ps.tt_usecs;
+ struct ehci_tt *tt;
+
+ if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
+ return;
+ start_uf = qh->ps.bw_phase << 3;
+
+ bandwidth_dbg(ehci, sign, "intr", &qh->ps);
+
+ if (sign < 0) { /* Release bandwidth */
+ usecs = -usecs;
+ c_usecs = -c_usecs;
+ tt_usecs = -tt_usecs;
+ }
+
+ /* Entire transaction (high speed) or start-split (full/low speed) */
+ for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += qh->ps.bw_uperiod)
+ ehci->bandwidth[i] += usecs;
+
+ /* Complete-split (full/low speed) */
+ if (qh->ps.c_usecs) {
+ /* NOTE: adjustments needed for FSTN */
+ for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += qh->ps.bw_uperiod) {
+ for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
+ if (qh->ps.cs_mask & m)
+ ehci->bandwidth[i+j] += c_usecs;
+ }
}
}
-#ifdef DEBUG
- if (usecs > 100)
- ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
- frame * 8 + uframe, usecs);
-#endif
- return usecs;
+
+ /* FS/LS bus bandwidth */
+ if (tt_usecs) {
+ tt = find_tt(qh->ps.udev);
+ if (sign > 0)
+ list_add_tail(&qh->ps.ps_list, &tt->ps_list);
+ else
+ list_del(&qh->ps.ps_list);
+
+ for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
+ i += qh->ps.bw_period)
+ tt->bandwidth[i] += tt_usecs;
+ }
}
/*-------------------------------------------------------------------------*/
-static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+ struct ehci_tt *tt)
+{
+ struct ehci_per_sched *ps;
+ unsigned uframe, uf, x;
+ u8 *budget_line;
+
+ if (!tt)
+ return;
+ memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
+
+ /* Add up the contributions from all the endpoints using this TT */
+ list_for_each_entry(ps, &tt->ps_list, ps_list) {
+ for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += ps->bw_uperiod) {
+ budget_line = &budget_table[uframe];
+ x = ps->tt_usecs;
+
+ /* propagate the time forward */
+ for (uf = ps->phase_uf; uf < 8; ++uf) {
+ x += budget_line[uf];
+
+ /* Each microframe lasts 125 us */
+ if (x <= 125) {
+ budget_line[uf] = x;
+ break;
+ } else {
+ budget_line[uf] = 125;
+ x -= 125;
+ }
+ }
+ }
+ }
+}
+
+static int __maybe_unused same_tt(struct usb_device *dev1,
+ struct usb_device *dev2)
{
if (!dev1->tt || !dev2->tt)
return 0;
@@ -205,68 +354,6 @@ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
}
}
-/* How many of the tt's periodic downstream 1000 usecs are allocated?
- *
- * While this measures the bandwidth in terms of usecs/uframe,
- * the low/fullspeed bus has no notion of uframes, so any particular
- * low/fullspeed transfer can "carry over" from one uframe to the next,
- * since the TT just performs downstream transfers in sequence.
- *
- * For example two separate 100 usec transfers can start in the same uframe,
- * and the second one would "carry over" 75 usecs into the next uframe.
- */
-static void
-periodic_tt_usecs (
- struct ehci_hcd *ehci,
- struct usb_device *dev,
- unsigned frame,
- unsigned short tt_usecs[8]
-)
-{
- __hc32 *hw_p = &ehci->periodic [frame];
- union ehci_shadow *q = &ehci->pshadow [frame];
- unsigned char uf;
-
- memset(tt_usecs, 0, 16);
-
- while (q->ptr) {
- switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
- case Q_TYPE_ITD:
- hw_p = &q->itd->hw_next;
- q = &q->itd->itd_next;
- continue;
- case Q_TYPE_QH:
- if (same_tt(dev, q->qh->dev)) {
- uf = tt_start_uframe(ehci, q->qh->hw_info2);
- tt_usecs[uf] += q->qh->tt_usecs;
- }
- hw_p = &q->qh->hw_next;
- q = &q->qh->qh_next;
- continue;
- case Q_TYPE_SITD:
- if (same_tt(dev, q->sitd->urb->dev)) {
- uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
- tt_usecs[uf] += q->sitd->stream->tt_usecs;
- }
- hw_p = &q->sitd->hw_next;
- q = &q->sitd->sitd_next;
- continue;
- // case Q_TYPE_FSTN:
- default:
- ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
- frame);
- hw_p = &q->fstn->hw_next;
- q = &q->fstn->fstn_next;
- }
- }
-
- carryover_tt_bandwidth(tt_usecs);
-
- if (max_tt_usecs[7] < tt_usecs[7])
- ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
- frame, tt_usecs[7] - max_tt_usecs[7]);
-}
-
/*
* Return true if the device's tt's downstream bus is available for a
* periodic transfer of the specified length (usecs), starting at the
@@ -290,32 +377,32 @@ periodic_tt_usecs (
*/
static int tt_available (
struct ehci_hcd *ehci,
- unsigned period,
- struct usb_device *dev,
+ struct ehci_per_sched *ps,
+ struct ehci_tt *tt,
unsigned frame,
- unsigned uframe,
- u16 usecs
+ unsigned uframe
)
{
+ unsigned period = ps->bw_period;
+ unsigned usecs = ps->tt_usecs;
+
if ((period == 0) || (uframe >= 7)) /* error */
return 0;
- for (; frame < ehci->periodic_size; frame += period) {
- unsigned short tt_usecs[8];
+ for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
+ frame += period) {
+ unsigned i, uf;
+ unsigned short tt_usecs[8];
- periodic_tt_usecs (ehci, dev, frame, tt_usecs);
+ if (tt->bandwidth[frame] + usecs > 900)
+ return 0;
- ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
- " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
- frame, usecs, uframe,
- tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
- tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
+ uf = frame << 3;
+ for (i = 0; i < 8; (++i, ++uf))
+ tt_usecs[i] = ehci->tt_budget[uf];
- if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
- ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
- frame, uframe);
+ if (max_tt_usecs[uframe] <= tt_usecs[uframe])
return 0;
- }
/* special case for isoc transfers larger than 125us:
* the first and each subsequent fully used uframe
@@ -323,16 +410,11 @@ static int tt_available (
* already scheduled transactions
*/
if (125 < usecs) {
- int ufs = (usecs / 125) - 1;
- int i;
+ int ufs = (usecs / 125);
+
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
- if (0 < tt_usecs[i]) {
- ehci_vdbg(ehci,
- "multi-uframe xfer can't fit "
- "in frame %d uframe %d\n",
- frame, i);
+ if (0 < tt_usecs[i])
return 0;
- }
}
tt_usecs[uframe] += usecs;
@@ -340,12 +422,8 @@ static int tt_available (
carryover_tt_bandwidth(tt_usecs);
/* fail if the carryover pushed bw past the last uframe's limit */
- if (max_tt_usecs[7] < tt_usecs[7]) {
- ehci_vdbg(ehci,
- "tt unavailable usecs %d frame %d uframe %d\n",
- usecs, frame, uframe);
+ if (max_tt_usecs[7] < tt_usecs[7])
return 0;
- }
}
return 1;
@@ -375,6 +453,7 @@ static int tt_no_collision (
for (; frame < ehci->periodic_size; frame += period) {
union ehci_shadow here;
__hc32 type;
+ struct ehci_qh_hw *hw;
here = ehci->pshadow [frame];
type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
@@ -385,17 +464,18 @@ static int tt_no_collision (
here = here.itd->itd_next;
continue;
case Q_TYPE_QH:
- if (same_tt (dev, here.qh->dev)) {
+ hw = here.qh->hw;
+ if (same_tt(dev, here.qh->ps.udev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
- here.qh->hw_info2);
+ hw->hw_info2);
/* "knows" no gap is needed */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
- type = Q_NEXT_TYPE(ehci, here.qh->hw_next);
+ type = Q_NEXT_TYPE(ehci, hw->hw_next);
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
@@ -432,55 +512,26 @@ static int tt_no_collision (
/*-------------------------------------------------------------------------*/
-static int enable_periodic (struct ehci_hcd *ehci)
+static void enable_periodic(struct ehci_hcd *ehci)
{
- u32 cmd;
- int status;
-
- if (ehci->periodic_sched++)
- return 0;
-
- /* did clearing PSE did take effect yet?
- * takes effect only at frame boundaries...
- */
- status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_PSS, 0, 9 * 125);
- if (status)
- return status;
+ if (ehci->periodic_count++)
+ return;
- cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- /* posted write ... PSS happens later */
- ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
+ /* Stop waiting to turn off the periodic schedule */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
- /* make sure ehci_work scans these */
- ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
- % (ehci->periodic_size << 3);
- return 0;
+ /* Don't start the schedule until PSS is 0 */
+ ehci_poll_PSS(ehci);
+ turn_on_io_watchdog(ehci);
}
-static int disable_periodic (struct ehci_hcd *ehci)
+static void disable_periodic(struct ehci_hcd *ehci)
{
- u32 cmd;
- int status;
-
- if (--ehci->periodic_sched)
- return 0;
-
- /* did setting PSE not take effect yet?
- * takes effect only at frame boundaries...
- */
- status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_PSS, STS_PSS, 9 * 125);
- if (status)
- return status;
-
- cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE;
- ehci_writel(ehci, cmd, &ehci->regs->command);
- /* posted write ... */
+ if (--ehci->periodic_count)
+ return;
- ehci->next_uframe = -1;
- return 0;
+ /* Don't turn off the schedule until PSS is 1 */
+ ehci_poll_PSS(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -491,21 +542,22 @@ static int disable_periodic (struct ehci_hcd *ehci)
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; ehci 0.96+)
*/
-static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
- unsigned period = qh->period;
+ unsigned period = qh->ps.period;
- dev_dbg (&qh->dev->dev,
+ dev_dbg(&qh->ps.udev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
- period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
+ period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
+ & (QH_CMASK | QH_SMASK),
+ qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
- for (i = qh->start; i < ehci->periodic_size; i += period) {
+ for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
union ehci_shadow *prev = &ehci->pshadow[i];
__hc32 *hw_p = &ehci->periodic[i];
union ehci_shadow here = *prev;
@@ -517,7 +569,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
prev = periodic_next_shadow(ehci, prev, type);
- hw_p = &here.qh->hw_next;
+ hw_p = shadow_next_periodic(ehci, &here, type);
here = *prev;
}
@@ -525,95 +577,188 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
- if (qh->period > here.qh->period)
+ if (qh->ps.period > here.qh->ps.period)
break;
prev = &here.qh->qh_next;
- hw_p = &here.qh->hw_next;
+ hw_p = &here.qh->hw->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
- qh->hw_next = *hw_p;
+ qh->hw->hw_next = *hw_p;
wmb ();
prev->qh = qh;
*hw_p = QH_NEXT (ehci, qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
- qh_get (qh);
+ qh->xacterrs = 0;
+ qh->exception = 0;
+
+ /* update per-qh bandwidth for debugfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
+ ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+ : (qh->ps.usecs * 8);
- /* update per-qh bandwidth for usbfs */
- ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
+ list_add(&qh->intr_node, &ehci->intr_qh_list);
/* maybe enable periodic schedule processing */
- return enable_periodic(ehci);
+ ++ehci->intr_count;
+ enable_periodic(ehci);
}
-static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
- // FIXME:
- // IF this isn't high speed
- // and this qh is active in the current uframe
- // (and overlay token SplitXstate is false?)
- // THEN
- // qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */);
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
/* high bandwidth, or otherwise part of every microframe */
- if ((period = qh->period) == 0)
- period = 1;
+ period = qh->ps.period ? : 1;
- for (i = qh->start; i < ehci->periodic_size; i += period)
+ for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
periodic_unlink (ehci, i, qh);
- /* update per-qh bandwidth for usbfs */
- ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
+ /* update per-qh bandwidth for debugfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
+ ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+ : (qh->ps.usecs * 8);
- dev_dbg (&qh->dev->dev,
+ dev_dbg(&qh->ps.udev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
- qh->period,
- hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
+ qh->ps.period,
+ hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
+ qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
- qh_put (qh);
- /* maybe turn off periodic schedule */
- return disable_periodic(ehci);
+ if (ehci->qh_scan_next == qh)
+ ehci->qh_scan_next = list_entry(qh->intr_node.next,
+ struct ehci_qh, intr_node);
+ list_del(&qh->intr_node);
}
-static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- unsigned wait;
+ if (qh->qh_state != QH_STATE_LINKED ||
+ list_empty(&qh->unlink_node))
+ return;
+
+ list_del_init(&qh->unlink_node);
+
+ /*
+ * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
+ * avoiding unnecessary CPU wakeup
+ */
+}
+
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do. */
+ if (qh->qh_state != QH_STATE_LINKED)
+ return;
+
+ /* if the qh is waiting for unlink, cancel it now */
+ cancel_unlink_wait_intr(ehci, qh);
qh_unlink_periodic (ehci, qh);
- /* simple/paranoid: always delay, expecting the HC needs to read
- * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
- * expect khubd to clean up after any CSPLITs we won't issue.
- * active high speed queues may need bigger delays...
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
*/
- if (list_empty (&qh->qtd_list)
- || (cpu_to_hc32(ehci, QH_CMASK)
- & qh->hw_info2) != 0)
- wait = 2;
- else
- wait = 55; /* worst case: 3 * 1024 */
+ qh->unlink_cycle = ehci->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
+
+ if (ehci->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (ehci->rh_state < EHCI_RH_RUNNING)
+ ehci_handle_intr_unlinks(ehci);
+ else if (ehci->intr_unlink.next == &qh->unlink_node) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+ ++ehci->intr_unlink_cycle;
+ }
+}
+
+/*
+ * It is common only one intr URB is scheduled on one qh, and
+ * given complete() is run in tasklet context, introduce a bit
+ * delay to avoid unlink qh too early.
+ */
+static void start_unlink_intr_wait(struct ehci_hcd *ehci,
+ struct ehci_qh *qh)
+{
+ qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
+
+ /* New entries go at the end of the intr_unlink_wait list */
+ list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
+
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ ehci_handle_start_intr_unlinks(ehci);
+ else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+ ++ehci->intr_unlink_wait_cycle;
+ }
+}
+
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ struct ehci_qh_hw *hw = qh->hw;
+ int rc;
- udelay (wait);
qh->qh_state = QH_STATE_IDLE;
- qh->hw_next = EHCI_LIST_END(ehci);
- wmb ();
+ hw->hw_next = EHCI_LIST_END(ehci);
+
+ if (!list_empty(&qh->qtd_list))
+ qh_completions(ehci, qh);
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
+ rc = qh_schedule(ehci, qh);
+ if (rc == 0) {
+ qh_refresh(ehci, qh);
+ qh_link_periodic(ehci, qh);
+ }
+
+ /* An error here likely indicates handshake failure
+ * or no space left in the schedule. Neither fault
+ * should happen often ...
+ *
+ * FIXME kill the now-dysfunctional queued urbs
+ */
+ else {
+ ehci_err(ehci, "can't reschedule qh %p, err %d\n",
+ qh, rc);
+ }
+ }
+
+ /* maybe turn off periodic schedule */
+ --ehci->intr_count;
+ disable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -622,42 +767,22 @@ static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
- unsigned period,
+ unsigned uperiod,
unsigned usecs
) {
- int claimed;
-
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
- /*
- * 80% periodic == 100 usec/uframe available
- * convert "usecs we need" to "max already claimed"
- */
- usecs = 100 - usecs;
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = ehci->uframe_periodic_max - usecs;
- /* we "know" 2 and 4 uframe intervals were rejected; so
- * for period 0, check _every_ microframe in the schedule.
- */
- if (unlikely (period == 0)) {
- do {
- for (uframe = 0; uframe < 7; uframe++) {
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
- }
- } while ((frame += 1) < ehci->periodic_size);
-
- /* just check the specified uframe, at that period */
- } else {
- do {
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
- } while ((frame += period) < ehci->periodic_size);
+ for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += uperiod) {
+ if (ehci->bandwidth[uframe] > usecs)
+ return 0;
}
// success!
@@ -668,40 +793,40 @@ static int check_intr_schedule (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
- const struct ehci_qh *qh,
- __hc32 *c_maskp
+ struct ehci_qh *qh,
+ unsigned *c_maskp,
+ struct ehci_tt *tt
)
{
int retval = -ENOSPC;
u8 mask = 0;
- if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
- if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
+ if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
goto done;
- if (!qh->c_usecs) {
+ if (!qh->ps.c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
- if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
- qh->tt_usecs)) {
+ if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
unsigned i;
/* TODO : this may need FSTN for SSPLIT in uframe 5. */
- for (i=uframe+1; i<8 && i<uframe+4; i++)
- if (!check_period (ehci, frame, i,
- qh->period, qh->c_usecs))
+ for (i = uframe+2; i < 8 && i <= uframe+4; i++)
+ if (!check_period(ehci, frame, i,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
else
mask |= 1 << i;
retval = 0;
- *c_maskp = cpu_to_hc32(ehci, mask << 8);
+ *c_maskp = mask;
}
#else
/* Make sure this tt's buffer is also available for CSPLITs.
@@ -712,15 +837,15 @@ static int check_intr_schedule (
* one smart pass...
*/
mask = 0x03 << (uframe + qh->gap_uf);
- *c_maskp = cpu_to_hc32(ehci, mask << 8);
+ *c_maskp = mask;
mask |= 1 << uframe;
- if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
- if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
- qh->period, qh->c_usecs))
+ if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
+ if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
- if (!check_period (ehci, frame, uframe + qh->gap_uf,
- qh->period, qh->c_usecs))
+ if (!check_period(ehci, frame, uframe + qh->gap_uf,
+ qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
retval = 0;
}
@@ -734,63 +859,68 @@ done:
*/
static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- int status;
+ int status = 0;
unsigned uframe;
- __hc32 c_mask;
- unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ unsigned c_mask;
+ struct ehci_qh_hw *hw = qh->hw;
+ struct ehci_tt *tt;
- qh_refresh(ehci, qh);
- qh->hw_next = EHCI_LIST_END(ehci);
- frame = qh->start;
+ hw->hw_next = EHCI_LIST_END(ehci);
/* reuse the previous schedule slots, if we can */
- if (frame < qh->period) {
- uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK);
- status = check_intr_schedule (ehci, frame, --uframe,
- qh, &c_mask);
- } else {
- uframe = 0;
- c_mask = 0;
- status = -ENOSPC;
+ if (qh->ps.phase != NO_FRAME) {
+ ehci_dbg(ehci, "reused qh %p schedule\n", qh);
+ return 0;
+ }
+
+ uframe = 0;
+ c_mask = 0;
+ tt = find_tt(qh->ps.udev);
+ if (IS_ERR(tt)) {
+ status = PTR_ERR(tt);
+ goto done;
}
+ compute_tt_budget(ehci->tt_budget, tt);
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
- if (status) {
- /* "normal" case, uframing flexible except with splits */
- if (qh->period) {
- frame = qh->period - 1;
- do {
- for (uframe = 0; uframe < 8; uframe++) {
- status = check_intr_schedule (ehci,
- frame, uframe, qh,
- &c_mask);
- if (status == 0)
- break;
- }
- } while (status && frame--);
-
- /* qh->period == 0 means every uframe */
- } else {
- frame = 0;
- status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->ps.bw_period) {
+ int i;
+ unsigned frame;
+
+ for (i = qh->ps.bw_period; i > 0; --i) {
+ frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule(ehci,
+ frame, uframe, qh, &c_mask, tt);
+ if (status == 0)
+ goto got_it;
+ }
}
- if (status)
- goto done;
- qh->start = frame;
- /* reset S-frame and (maybe) C-frame masks */
- qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
- qh->hw_info2 |= qh->period
- ? cpu_to_hc32(ehci, 1 << uframe)
- : cpu_to_hc32(ehci, QH_SMASK);
- qh->hw_info2 |= c_mask;
- } else
- ehci_dbg (ehci, "reused qh %p schedule\n", qh);
+ /* qh->ps.bw_period == 0 means every uframe */
+ } else {
+ status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
+ }
+ if (status)
+ goto done;
+
+ got_it:
+ qh->ps.phase = (qh->ps.period ? ehci->random_frame &
+ (qh->ps.period - 1) : 0);
+ qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
+ qh->ps.phase_uf = uframe;
+ qh->ps.cs_mask = qh->ps.period ?
+ (c_mask << 8) | (1 << uframe) :
+ QH_SMASK;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
+ reserve_release_intr_bandwidth(ehci, qh, 1);
- /* stuff into the periodic schedule */
- status = qh_link_periodic (ehci, qh);
done:
return status;
}
@@ -812,8 +942,7 @@ static int intr_submit (
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -837,6 +966,15 @@ static int intr_submit (
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON (qh == NULL);
+ /* stuff into the periodic schedule */
+ if (qh->qh_state == QH_STATE_IDLE) {
+ qh_refresh(ehci, qh);
+ qh_link_periodic(ehci, qh);
+ } else {
+ /* cancel unlink wait for the qh */
+ cancel_unlink_wait_intr(ehci, qh);
+ }
+
/* ... update usbfs periodic stats */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
@@ -851,6 +989,34 @@ done_not_linked:
return status;
}
+static void scan_intr(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+
+ list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
+ intr_node) {
+
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why ehci->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then ehci->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(ehci, qh);
+ if (unlikely(temp))
+ start_unlink_intr(ehci, qh);
+ else if (unlikely(list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED))
+ start_unlink_intr_wait(ehci, qh);
+ }
+ }
+}
+
/*-------------------------------------------------------------------------*/
/* ehci_iso_stream ops work with both ITD and SITD */
@@ -864,8 +1030,8 @@ iso_stream_alloc (gfp_t mem_flags)
if (likely (stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
- stream->next_uframe = -1;
- stream->refcount = 1;
+ stream->next_uframe = NO_FRAME;
+ stream->ps.phase = NO_FRAME;
}
return stream;
}
@@ -874,25 +1040,24 @@ static void
iso_stream_init (
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
- struct usb_device *dev,
- int pipe,
- unsigned interval
+ struct urb *urb
)
{
static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
+ struct usb_device *dev = urb->dev;
u32 buf1;
unsigned epnum, maxp;
int is_input;
- long bandwidth;
+ unsigned tmp;
/*
* this might be a "high bandwidth" highspeed endpoint,
* as encoded in the ep descriptor's wMaxPacket field
*/
- epnum = usb_pipeendpoint (pipe);
- is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
- maxp = usb_maxpacket(dev, pipe, !is_input);
+ epnum = usb_pipeendpoint(urb->pipe);
+ is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
+ maxp = usb_endpoint_maxp(&urb->ep->desc);
if (is_input) {
buf1 = (1 << 11);
} else {
@@ -916,9 +1081,19 @@ iso_stream_init (
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
*/
- stream->usecs = HS_USECS_ISO (maxp);
- bandwidth = stream->usecs * 8;
- bandwidth /= interval;
+ stream->ps.usecs = HS_USECS_ISO(maxp);
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+ 1 << (urb->ep->desc.bInterval - 1));
+
+ /* Allow urb->interval to override */
+ stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+
+ stream->uperiod = urb->interval;
+ stream->ps.period = urb->interval >> 3;
+ stream->bandwidth = stream->ps.usecs * 8 /
+ stream->ps.bw_uperiod;
} else {
u32 addr;
@@ -932,101 +1107,49 @@ iso_stream_init (
addr |= dev->tt->hub->devnum << 16;
addr |= epnum << 8;
addr |= dev->devnum;
- stream->usecs = HS_USECS_ISO (maxp);
+ stream->ps.usecs = HS_USECS_ISO(maxp);
think_time = dev->tt ? dev->tt->think_time : 0;
- stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
+ stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
dev->speed, is_input, 1, maxp));
hs_transfers = max (1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
addr |= 1 << 31;
- stream->c_usecs = stream->usecs;
- stream->usecs = HS_USECS_ISO (1);
- stream->raw_mask = 1;
+ stream->ps.c_usecs = stream->ps.usecs;
+ stream->ps.usecs = HS_USECS_ISO(1);
+ stream->ps.cs_mask = 1;
/* c-mask as specified in USB 2.0 11.18.4 3.c */
tmp = (1 << (hs_transfers + 2)) - 1;
- stream->raw_mask |= tmp << (8 + 2);
+ stream->ps.cs_mask |= tmp << (8 + 2);
} else
- stream->raw_mask = smask_out [hs_transfers - 1];
- bandwidth = stream->usecs + stream->c_usecs;
- bandwidth /= interval << 3;
+ stream->ps.cs_mask = smask_out[hs_transfers - 1];
+
+ /* period for bandwidth allocation */
+ tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+ 1 << (urb->ep->desc.bInterval - 1));
- /* stream->splits gets created from raw_mask later */
+ /* Allow urb->interval to override */
+ stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ stream->ps.bw_uperiod = stream->ps.bw_period << 3;
+
+ stream->ps.period = urb->interval;
+ stream->uperiod = urb->interval << 3;
+ stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
+ stream->ps.bw_period;
+
+ /* stream->splits gets created from cs_mask later */
stream->address = cpu_to_hc32(ehci, addr);
}
- stream->bandwidth = bandwidth;
- stream->udev = dev;
+ stream->ps.udev = dev;
+ stream->ps.ep = urb->ep;
stream->bEndpointAddress = is_input | epnum;
- stream->interval = interval;
stream->maxp = maxp;
}
-static void
-iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
-{
- stream->refcount--;
-
- /* free whenever just a dev->ep reference remains.
- * not like a QH -- no persistent state (toggle, halt)
- */
- if (stream->refcount == 1) {
- int is_in;
-
- // BUG_ON (!list_empty(&stream->td_list));
-
- while (!list_empty (&stream->free_list)) {
- struct list_head *entry;
-
- entry = stream->free_list.next;
- list_del (entry);
-
- /* knows about ITD vs SITD */
- if (stream->highspeed) {
- struct ehci_itd *itd;
-
- itd = list_entry (entry, struct ehci_itd,
- itd_list);
- dma_pool_free (ehci->itd_pool, itd,
- itd->itd_dma);
- } else {
- struct ehci_sitd *sitd;
-
- sitd = list_entry (entry, struct ehci_sitd,
- sitd_list);
- dma_pool_free (ehci->sitd_pool, sitd,
- sitd->sitd_dma);
- }
- }
-
- is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
- stream->bEndpointAddress &= 0x0f;
- stream->ep->hcpriv = NULL;
-
- if (stream->rescheduled) {
- ehci_info (ehci, "ep%d%s-iso rescheduled "
- "%lu times in %lu seconds\n",
- stream->bEndpointAddress, is_in ? "in" : "out",
- stream->rescheduled,
- ((jiffies - stream->start)/HZ)
- );
- }
-
- kfree(stream);
- }
-}
-
-static inline struct ehci_iso_stream *
-iso_stream_get (struct ehci_iso_stream *stream)
-{
- if (likely (stream != NULL))
- stream->refcount++;
- return stream;
-}
-
static struct ehci_iso_stream *
iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
{
@@ -1047,24 +1170,18 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
if (unlikely (stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely (stream != NULL)) {
- /* dev->ep owns the initial refcount */
ep->hcpriv = stream;
- stream->ep = ep;
- iso_stream_init(ehci, stream, urb->dev, urb->pipe,
- urb->interval);
+ iso_stream_init(ehci, stream, urb);
}
- /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
- } else if (unlikely (stream->hw_info1 != 0)) {
+ /* if dev->ep [epnum] is a QH, hw is set */
+ } else if (unlikely (stream->hw != NULL)) {
ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
urb->dev->devpath, epnum,
usb_pipein(urb->pipe) ? "in" : "out");
stream = NULL;
}
- /* caller guarantees an eventual matching iso_stream_put */
- stream = iso_stream_get (stream);
-
spin_unlock_irqrestore (&ehci->lock, flags);
return stream;
}
@@ -1099,7 +1216,7 @@ itd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
- iso_sched->span = urb->number_of_packets * stream->interval;
+ iso_sched->span = urb->number_of_packets * stream->uperiod;
/* figure out per-uframe itd fields that we'll need later
* when we fit new itds into the schedule.
@@ -1172,17 +1289,19 @@ itd_urb_transaction (
spin_lock_irqsave (&ehci->lock, flags);
for (i = 0; i < num_itds; i++) {
- /* free_list.next might be cache-hot ... but maybe
- * the HC caches it too. avoid that issue for now.
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
*/
-
- /* prefer previously-allocated itds */
- if (likely (!list_empty(&stream->free_list))) {
- itd = list_entry (stream->free_list.prev,
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
struct ehci_itd, itd_list);
+ if (itd->frame == ehci->now_frame)
+ goto alloc_itd;
list_del (&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
+ alloc_itd:
spin_unlock_irqrestore (&ehci->lock, flags);
itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
&itd_dma);
@@ -1196,6 +1315,7 @@ itd_urb_transaction (
memset (itd, 0, sizeof *itd);
itd->itd_dma = itd_dma;
+ itd->frame = NO_FRAME;
list_add (&itd->itd_list, &sched->td_list);
}
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1208,102 +1328,155 @@ itd_urb_transaction (
/*-------------------------------------------------------------------------*/
+static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
+ struct ehci_iso_stream *stream, int sign)
+{
+ unsigned uframe;
+ unsigned i, j;
+ unsigned s_mask, c_mask, m;
+ int usecs = stream->ps.usecs;
+ int c_usecs = stream->ps.c_usecs;
+ int tt_usecs = stream->ps.tt_usecs;
+ struct ehci_tt *tt;
+
+ if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
+ return;
+ uframe = stream->ps.bw_phase << 3;
+
+ bandwidth_dbg(ehci, sign, "iso", &stream->ps);
+
+ if (sign < 0) { /* Release bandwidth */
+ usecs = -usecs;
+ c_usecs = -c_usecs;
+ tt_usecs = -tt_usecs;
+ }
+
+ if (!stream->splits) { /* High speed */
+ for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+ i += stream->ps.bw_uperiod)
+ ehci->bandwidth[i] += usecs;
+
+ } else { /* Full speed */
+ s_mask = stream->ps.cs_mask;
+ c_mask = s_mask >> 8;
+
+ /* NOTE: adjustment needed for frame overflow */
+ for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
+ i += stream->ps.bw_uperiod) {
+ for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
+ (++j, m <<= 1)) {
+ if (s_mask & m)
+ ehci->bandwidth[i+j] += usecs;
+ else if (c_mask & m)
+ ehci->bandwidth[i+j] += c_usecs;
+ }
+ }
+
+ tt = find_tt(stream->ps.udev);
+ if (sign > 0)
+ list_add_tail(&stream->ps.ps_list, &tt->ps_list);
+ else
+ list_del(&stream->ps.ps_list);
+
+ for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
+ i += stream->ps.bw_period)
+ tt->bandwidth[i] += tt_usecs;
+ }
+}
+
static inline int
itd_slot_ok (
struct ehci_hcd *ehci,
- u32 mod,
- u32 uframe,
- u8 usecs,
- u32 period
+ struct ehci_iso_stream *stream,
+ unsigned uframe
)
{
- uframe %= period;
- do {
- /* can't commit more than 80% periodic == 100 usec */
- if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
- > (100 - usecs))
- return 0;
+ unsigned usecs;
+
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = ehci->uframe_periodic_max - stream->ps.usecs;
- /* we know urb->interval is 2^N uframes */
- uframe += period;
- } while (uframe < mod);
+ for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
+ uframe += stream->ps.bw_uperiod) {
+ if (ehci->bandwidth[uframe] > usecs)
+ return 0;
+ }
return 1;
}
static inline int
sitd_slot_ok (
struct ehci_hcd *ehci,
- u32 mod,
struct ehci_iso_stream *stream,
- u32 uframe,
+ unsigned uframe,
struct ehci_iso_sched *sched,
- u32 period_uframes
+ struct ehci_tt *tt
)
{
- u32 mask, tmp;
- u32 frame, uf;
+ unsigned mask, tmp;
+ unsigned frame, uf;
+
+ mask = stream->ps.cs_mask << (uframe & 7);
- mask = stream->raw_mask << (uframe & 7);
+ /* for OUT, don't wrap SSPLIT into H-microframe 7 */
+ if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
+ return 0;
/* for IN, don't wrap CSPLIT into the next frame */
if (mask & ~0xffff)
return 0;
- /* this multi-pass logic is simple, but performance may
- * suffer when the schedule data isn't cached.
- */
-
/* check bandwidth */
- uframe %= period_uframes;
- do {
- u32 max_used;
-
- frame = uframe >> 3;
- uf = uframe & 7;
+ uframe &= stream->ps.bw_uperiod - 1;
+ frame = uframe >> 3;
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
- /* The tt's fullspeed bus bandwidth must be available.
- * tt_available scheduling guarantees 10+% for control/bulk.
- */
- if (!tt_available (ehci, period_uframes << 3,
- stream->udev, frame, uf, stream->tt_usecs))
- return 0;
+ /* The tt's fullspeed bus bandwidth must be available.
+ * tt_available scheduling guarantees 10+% for control/bulk.
+ */
+ uf = uframe & 7;
+ if (!tt_available(ehci, &stream->ps, tt, frame, uf))
+ return 0;
#else
- /* tt must be idle for start(s), any gap, and csplit.
- * assume scheduling slop leaves 10+% for control/bulk.
- */
- if (!tt_no_collision (ehci, period_uframes << 3,
- stream->udev, frame, mask))
- return 0;
+ /* tt must be idle for start(s), any gap, and csplit.
+ * assume scheduling slop leaves 10+% for control/bulk.
+ */
+ if (!tt_no_collision(ehci, stream->ps.bw_period,
+ stream->ps.udev, frame, mask))
+ return 0;
#endif
+ do {
+ unsigned max_used;
+ unsigned i;
+
/* check starts (OUT uses more than one) */
- max_used = 100 - stream->usecs;
- for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
- if (periodic_usecs (ehci, frame, uf) > max_used)
+ uf = uframe;
+ max_used = ehci->uframe_periodic_max - stream->ps.usecs;
+ for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
+ if (ehci->bandwidth[uf] > max_used)
return 0;
}
/* for IN, check CSPLIT */
- if (stream->c_usecs) {
- uf = uframe & 7;
- max_used = 100 - stream->c_usecs;
- do {
- tmp = 1 << uf;
- tmp <<= 8;
- if ((stream->raw_mask & tmp) == 0)
+ if (stream->ps.c_usecs) {
+ max_used = ehci->uframe_periodic_max -
+ stream->ps.c_usecs;
+ uf = uframe & ~7;
+ tmp = 1 << (2+8);
+ for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
+ if ((stream->ps.cs_mask & tmp) == 0)
continue;
- if (periodic_usecs (ehci, frame, uf)
- > max_used)
+ if (ehci->bandwidth[uf+i] > max_used)
return 0;
- } while (++uf < 8);
+ }
}
- /* we know urb->interval is 2^N uframes */
- uframe += period_uframes;
- } while (uframe < mod);
+ uframe += stream->ps.bw_uperiod;
+ } while (uframe < EHCI_BANDWIDTH_SIZE);
- stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
+ stream->ps.cs_mask <<= uframe & 7;
+ stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
return 1;
}
@@ -1318,8 +1491,6 @@ sitd_slot_ok (
* given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
-#define SCHEDULE_SLOP 10 /* frames */
-
static int
iso_stream_schedule (
struct ehci_hcd *ehci,
@@ -1327,107 +1498,189 @@ iso_stream_schedule (
struct ehci_iso_stream *stream
)
{
- u32 now, start, max, period;
- int status;
+ u32 now, base, next, start, period, span, now2;
+ u32 wrap = 0, skip = 0;
+ int status = 0;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
+ bool empty = list_empty(&stream->td_list);
+ bool new_stream = false;
- if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {
- ehci_dbg (ehci, "iso request %p too long\n", urb);
- status = -EFBIG;
- goto fail;
- }
+ period = stream->uperiod;
+ span = sched->span;
+ if (!stream->highspeed)
+ span <<= 3;
- if ((stream->depth + sched->span) > mod) {
- ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",
- urb, stream->depth, sched->span, mod);
- status = -EFBIG;
- goto fail;
- }
+ /* Start a new isochronous stream? */
+ if (unlikely(empty && !hcd_periodic_completion_in_progress(
+ ehci_to_hcd(ehci), urb->ep))) {
- now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
+ /* Schedule the endpoint */
+ if (stream->ps.phase == NO_FRAME) {
+ int done = 0;
+ struct ehci_tt *tt = find_tt(stream->ps.udev);
- /* when's the last uframe this urb could start? */
- max = now + mod;
+ if (IS_ERR(tt)) {
+ status = PTR_ERR(tt);
+ goto fail;
+ }
+ compute_tt_budget(ehci->tt_budget, tt);
- /* Typical case: reuse current schedule, stream is still active.
- * Hopefully there are no gaps from the host falling behind
- * (irq delays etc), but if there are we'll take the next
- * slot in the schedule, implicitly assuming URB_ISO_ASAP.
- */
- if (likely (!list_empty (&stream->td_list))) {
- start = stream->next_uframe;
- if (start < now)
- start += mod;
-
- /* Fell behind (by up to twice the slop amount)? */
- if (start >= max - 2 * 8 * SCHEDULE_SLOP)
- start += stream->interval * DIV_ROUND_UP(
- max - start, stream->interval) - mod;
-
- /* Tried to schedule too far into the future? */
- if (unlikely((start + sched->span) >= max)) {
- status = -EFBIG;
- goto fail;
+ start = ((-(++ehci->random_frame)) << 3) & (period - 1);
+
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (stream->highspeed) {
+ if (itd_slot_ok(ehci, stream, start))
+ done = 1;
+ } else {
+ if ((start % 8) >= 6)
+ continue;
+ if (sitd_slot_ok(ehci, stream, start,
+ sched, tt))
+ done = 1;
+ }
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ ehci_dbg(ehci, "iso sched full %p", urb);
+ status = -ENOSPC;
+ goto fail;
+ }
+ stream->ps.phase = (start >> 3) &
+ (stream->ps.period - 1);
+ stream->ps.bw_phase = stream->ps.phase &
+ (stream->ps.bw_period - 1);
+ stream->ps.phase_uf = start & 7;
+ reserve_release_iso_bandwidth(ehci, stream, 1);
+ }
+
+ /* New stream is already scheduled; use the upcoming slot */
+ else {
+ start = (stream->ps.phase << 3) + stream->ps.phase_uf;
}
- goto ready;
+
+ stream->next_uframe = start;
+ new_stream = true;
}
- /* need to schedule; when's the next (u)frame we could start?
- * this is bigger than ehci->i_thresh allows; scheduling itself
- * isn't free, the slop should handle reasonably slow cpus. it
- * can also help high bandwidth if the dma and irq loads don't
- * jump until after the queue is primed.
+ now = ehci_read_frame_index(ehci) & (mod - 1);
+
+ /* Take the isochronous scheduling threshold into account */
+ if (ehci->i_thresh)
+ next = now + ehci->i_thresh; /* uframe cache */
+ else
+ next = (now + 2 + 7) & ~0x07; /* full frame cache */
+
+ /*
+ * Use ehci->last_iso_frame as the base. There can't be any
+ * TDs scheduled for earlier than that.
*/
- start = SCHEDULE_SLOP * 8 + (now & ~0x07);
- start %= mod;
- stream->next_uframe = start;
+ base = ehci->last_iso_frame << 3;
+ next = (next - base) & (mod - 1);
+ start = (stream->next_uframe - base) & (mod - 1);
- /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+ if (unlikely(new_stream))
+ goto do_ASAP;
- period = urb->interval;
- if (!stream->highspeed)
- period <<= 3;
+ /*
+ * Typical case: reuse current schedule, stream may still be active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc). If there are, the behavior depends on
+ * whether URB_ISO_ASAP is set.
+ */
+ now2 = (now - base) & (mod - 1);
- /* find a uframe slot with enough bandwidth */
- for (; start < (stream->next_uframe + period); start++) {
- int enough_space;
+ /* Is the schedule already full? */
+ if (unlikely(!empty && start < period)) {
+ ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+ urb, stream->next_uframe, base, period, mod);
+ status = -ENOSPC;
+ goto fail;
+ }
- /* check schedule: enough space? */
- if (stream->highspeed)
- enough_space = itd_slot_ok (ehci, mod, start,
- stream->usecs, period);
- else {
- if ((start % 8) >= 6)
- continue;
- enough_space = sitd_slot_ok (ehci, mod, stream,
- start, sched, period);
- }
+ /* Is the next packet scheduled after the base time? */
+ if (likely(!empty || start <= now2 + period)) {
- /* schedule it here if there's enough bandwidth */
- if (enough_space) {
- stream->next_uframe = start % mod;
- goto ready;
- }
+ /* URB_ISO_ASAP: make sure that start >= next */
+ if (unlikely(start < next &&
+ (urb->transfer_flags & URB_ISO_ASAP)))
+ goto do_ASAP;
+
+ /* Otherwise use start, if it's not in the past */
+ if (likely(start >= now2))
+ goto use_start;
+
+ /* Otherwise we got an underrun while the queue was empty */
+ } else {
+ if (urb->transfer_flags & URB_ISO_ASAP)
+ goto do_ASAP;
+ wrap = mod;
+ now2 += mod;
}
- /* no room in the schedule */
- ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
- list_empty (&stream->td_list) ? "" : "re",
- urb, now, max);
- status = -ENOSPC;
+ /* How many uframes and packets do we need to skip? */
+ skip = (now2 - start + period - 1) & -period;
+ if (skip >= span) { /* Entirely in the past? */
+ ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
+ urb, start + base, span - period, now2 + base,
+ base);
+
+ /* Try to keep the last TD intact for scanning later */
+ skip = span - period;
+
+ /* Will it come before the current scan position? */
+ if (empty) {
+ skip = span; /* Skip the entire URB */
+ status = 1; /* and give it back immediately */
+ iso_sched_free(stream, sched);
+ sched = NULL;
+ }
+ }
+ urb->error_count = skip / period;
+ if (sched)
+ sched->first_packet = urb->error_count;
+ goto use_start;
+
+ do_ASAP:
+ /* Use the first slot after "next" */
+ start = next + ((start - next) & (period - 1));
+
+ use_start:
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start + span - period >= mod + wrap)) {
+ ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
+ urb, start, span - period, mod + wrap);
+ status = -EFBIG;
+ goto fail;
+ }
-fail:
- iso_sched_free (stream, sched);
- urb->hcpriv = NULL;
- return status;
+ start += base;
+ stream->next_uframe = (start + skip) & (mod - 1);
-ready:
/* report high speed start in uframes; full speed, in frames */
- urb->start_frame = stream->next_uframe;
+ urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
- return 0;
+
+ /* Make sure scan_isoc() sees these */
+ if (ehci->isoc_count == 0)
+ ehci->last_iso_frame = now >> 3;
+ return status;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -1485,18 +1738,31 @@ itd_patch(
static inline void
itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{
- /* always prepend ITD/SITD ... only QH tree is order-sensitive */
- itd->itd_next = ehci->pshadow [frame];
- itd->hw_next = ehci->periodic [frame];
- ehci->pshadow [frame].itd = itd;
+ union ehci_shadow *prev = &ehci->pshadow[frame];
+ __hc32 *hw_p = &ehci->periodic[frame];
+ union ehci_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip any iso nodes which might belong to previous microframes */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(ehci, *hw_p);
+ if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(ehci, prev, type);
+ hw_p = shadow_next_periodic(ehci, &here, type);
+ here = *prev;
+ }
+
+ itd->itd_next = here;
+ itd->hw_next = *hw_p;
+ prev->itd = itd;
itd->frame = frame;
wmb ();
- ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
+ *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
/* fit urb's itds into the selected schedule slot; activate as needed */
-static int
-itd_link_urb (
+static void itd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
@@ -1508,23 +1774,22 @@ itd_link_urb (
struct ehci_iso_sched *iso_sched = urb->hcpriv;
struct ehci_itd *itd;
- next_uframe = stream->next_uframe % mod;
+ next_uframe = stream->next_uframe & (mod - 1);
- if (unlikely (list_empty(&stream->td_list))) {
+ if (unlikely (list_empty(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
- ehci_vdbg (ehci,
- "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
- urb->dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- urb->interval,
- next_uframe >> 3, next_uframe & 0x7);
- stream->start = jiffies;
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_disable();
}
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
- for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
+ for (packet = iso_sched->first_packet, itd = NULL;
+ packet < urb->number_of_packets;) {
if (itd == NULL) {
/* ASSERT: we have all necessary itds */
// BUG_ON (list_empty (&iso_sched->td_list));
@@ -1534,8 +1799,8 @@ itd_link_urb (
itd = list_entry (iso_sched->td_list.next,
struct ehci_itd, itd_list);
list_move_tail (&itd->itd_list, &stream->td_list);
- itd->stream = iso_stream_get (stream);
- itd->urb = usb_get_urb (urb);
+ itd->stream = stream;
+ itd->urb = urb;
itd_init (ehci, stream, itd);
}
@@ -1544,15 +1809,14 @@ itd_link_urb (
itd_patch(ehci, itd, iso_sched, packet, uframe);
- next_uframe += stream->interval;
- stream->depth += stream->interval;
- next_uframe %= mod;
+ next_uframe += stream->uperiod;
+ next_uframe &= mod - 1;
packet++;
/* link completed itds into the schedule */
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
- itd_link (ehci, frame % ehci->periodic_size, itd);
+ itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
itd = NULL;
}
}
@@ -1560,10 +1824,10 @@ itd_link_urb (
/* don't need that schedule data any more */
iso_sched_free (stream, iso_sched);
- urb->hcpriv = NULL;
+ urb->hcpriv = stream;
- timer_action (ehci, TIMER_IO_WATCHDOG);
- return enable_periodic(ehci);
+ ++ehci->isoc_count;
+ enable_periodic(ehci);
}
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
@@ -1578,11 +1842,8 @@ itd_link_urb (
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
-static unsigned
-itd_complete (
- struct ehci_hcd *ehci,
- struct ehci_itd *itd
-) {
+static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
+{
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
@@ -1590,7 +1851,7 @@ itd_complete (
int urb_index = -1;
struct ehci_iso_stream *stream = itd->stream;
struct usb_device *dev;
- unsigned retval = false;
+ bool retval = false;
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
@@ -1601,7 +1862,6 @@ itd_complete (
t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
itd->hw_transaction [uframe] = 0;
- stream->depth -= stream->interval;
/* report transfer status */
if (unlikely (t & ISO_ERRS)) {
@@ -1616,14 +1876,17 @@ itd_complete (
desc->status = -EPROTO;
/* HC need not update length with this error */
- if (!(t & EHCI_ISOC_BABBLE))
- desc->actual_length = EHCI_ITD_LENGTH (t);
+ if (!(t & EHCI_ISOC_BABBLE)) {
+ desc->actual_length = EHCI_ITD_LENGTH(t);
+ urb->actual_length += desc->actual_length;
+ }
} else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
desc->status = 0;
- desc->actual_length = EHCI_ITD_LENGTH (t);
+ desc->actual_length = EHCI_ITD_LENGTH(t);
+ urb->actual_length += desc->actual_length;
} else {
/* URB was too late */
- desc->status = -EXDEV;
+ urb->error_count++;
}
}
@@ -1641,25 +1904,32 @@ itd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- (void) disable_periodic(ehci);
+
+ --ehci->isoc_count;
+ disable_periodic(ehci);
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_enable();
+ }
- if (unlikely (list_empty (&stream->td_list))) {
+ if (unlikely(list_is_singular(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
- ehci_vdbg (ehci,
- "deschedule devp %s ep%d%s-iso\n",
- dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
- }
- iso_stream_put (ehci, stream);
- /* OK to recycle this ITD now that its completion callback ran. */
+
done:
- usb_put_urb(urb);
itd->urb = NULL;
- itd->stream = NULL;
- list_move(&itd->itd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &ehci->cached_itd_list);
+ start_free_itds(ehci);
+ }
return retval;
}
@@ -1679,9 +1949,9 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
- if (unlikely (urb->interval != stream->interval)) {
+ if (unlikely(urb->interval != stream->uperiod)) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
- stream->interval, urb->interval);
+ stream->uperiod, urb->interval);
goto done;
}
@@ -1705,8 +1975,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -1714,16 +1983,17 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
- if (likely (status == 0))
+ if (likely(status == 0)) {
itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
- else
+ } else if (status > 0) {
+ status = 0;
+ ehci_urb_done(ehci, urb, 0);
+ } else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
-done_not_linked:
+ }
+ done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
-
-done:
- if (unlikely (status < 0))
- iso_stream_put (ehci, stream);
+ done:
return status;
}
@@ -1746,7 +2016,7 @@ sitd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many frames are needed for these transfers */
- iso_sched->span = urb->number_of_packets * stream->interval;
+ iso_sched->span = urb->number_of_packets * stream->ps.period;
/* figure out per-frame sitd fields that we'll need later
* when we fit new sitds into the schedule.
@@ -1812,17 +2082,19 @@ sitd_urb_transaction (
* means we never need two sitds for full speed packets.
*/
- /* free_list.next might be cache-hot ... but maybe
- * the HC caches it too. avoid that issue for now.
+ /*
+ * Use siTDs from the free list, but not siTDs that may
+ * still be in use by the hardware.
*/
-
- /* prefer previously-allocated sitds */
- if (!list_empty(&stream->free_list)) {
- sitd = list_entry (stream->free_list.prev,
+ if (likely(!list_empty(&stream->free_list))) {
+ sitd = list_first_entry(&stream->free_list,
struct ehci_sitd, sitd_list);
+ if (sitd->frame == ehci->now_frame)
+ goto alloc_sitd;
list_del (&sitd->sitd_list);
sitd_dma = sitd->sitd_dma;
} else {
+ alloc_sitd:
spin_unlock_irqrestore (&ehci->lock, flags);
sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
&sitd_dma);
@@ -1836,6 +2108,7 @@ sitd_urb_transaction (
memset (sitd, 0, sizeof *sitd);
sitd->sitd_dma = sitd_dma;
+ sitd->frame = NO_FRAME;
list_add (&sitd->sitd_list, &iso_sched->td_list);
}
@@ -1891,8 +2164,7 @@ sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
}
/* fit urb's sitds into the selected schedule slot; activate as needed */
-static int
-sitd_link_urb (
+static void sitd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
@@ -1906,22 +2178,20 @@ sitd_link_urb (
next_uframe = stream->next_uframe;
- if (list_empty(&stream->td_list)) {
+ if (list_empty(&stream->td_list))
/* usbfs ignores TT bandwidth */
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
- ehci_vdbg (ehci,
- "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
- urb->dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- (next_uframe >> 3) % ehci->periodic_size,
- stream->interval, hc32_to_cpu(ehci, stream->splits));
- stream->start = jiffies;
+
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_disable();
}
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
- for (packet = 0, sitd = NULL;
+ for (packet = sched->first_packet, sitd = NULL;
packet < urb->number_of_packets;
packet++) {
@@ -1933,24 +2203,23 @@ sitd_link_urb (
sitd = list_entry (sched->td_list.next,
struct ehci_sitd, sitd_list);
list_move_tail (&sitd->sitd_list, &stream->td_list);
- sitd->stream = iso_stream_get (stream);
- sitd->urb = usb_get_urb (urb);
+ sitd->stream = stream;
+ sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
- sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
+ sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
- next_uframe += stream->interval << 3;
- stream->depth += stream->interval << 3;
+ next_uframe += stream->uperiod;
}
- stream->next_uframe = next_uframe % mod;
+ stream->next_uframe = next_uframe & (mod - 1);
/* don't need that schedule data any more */
iso_sched_free (stream, sched);
- urb->hcpriv = NULL;
+ urb->hcpriv = stream;
- timer_action (ehci, TIMER_IO_WATCHDOG);
- return enable_periodic(ehci);
+ ++ehci->isoc_count;
+ enable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -1968,25 +2237,22 @@ sitd_link_urb (
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
-static unsigned
-sitd_complete (
- struct ehci_hcd *ehci,
- struct ehci_sitd *sitd
-) {
+static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
+{
struct urb *urb = sitd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
int urb_index = -1;
struct ehci_iso_stream *stream = sitd->stream;
struct usb_device *dev;
- unsigned retval = false;
+ bool retval = false;
urb_index = sitd->index;
desc = &urb->iso_frame_desc [urb_index];
t = hc32_to_cpup(ehci, &sitd->hw_results);
/* report transfer status */
- if (t & SITD_ERRS) {
+ if (unlikely(t & SITD_ERRS)) {
urb->error_count++;
if (t & SITD_STS_DBE)
desc->status = usb_pipein (urb->pipe)
@@ -1996,11 +2262,14 @@ sitd_complete (
desc->status = -EOVERFLOW;
else /* XACT, MMF, etc */
desc->status = -EPROTO;
+ } else if (unlikely(t & SITD_STS_ACTIVE)) {
+ /* URB was too late */
+ urb->error_count++;
} else {
desc->status = 0;
- desc->actual_length = desc->length - SITD_LENGTH (t);
+ desc->actual_length = desc->length - SITD_LENGTH(t);
+ urb->actual_length += desc->actual_length;
}
- stream->depth -= stream->interval << 3;
/* handle completion now? */
if ((urb_index + 1) != urb->number_of_packets)
@@ -2016,25 +2285,32 @@ sitd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- (void) disable_periodic(ehci);
+
+ --ehci->isoc_count;
+ disable_periodic(ehci);
+
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+ if (ehci->amd_pll_fix == 1)
+ usb_amd_quirk_pll_enable();
+ }
- if (list_empty (&stream->td_list)) {
+ if (list_is_singular(&stream->td_list))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
- ehci_vdbg (ehci,
- "deschedule devp %s ep%d%s-iso\n",
- dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
- }
- iso_stream_put (ehci, stream);
- /* OK to recycle this SITD now that its completion callback ran. */
+
done:
- usb_put_urb(urb);
sitd->urb = NULL;
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&sitd->sitd_list, &stream->free_list);
+
+ /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &ehci->cached_sitd_list);
+ start_free_itds(ehci);
+ }
return retval;
}
@@ -2053,9 +2329,9 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
- if (urb->interval != stream->interval) {
+ if (urb->interval != stream->ps.period) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
- stream->interval, urb->interval);
+ stream->ps.period, urb->interval);
goto done;
}
@@ -2077,8 +2353,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
@@ -2086,48 +2361,47 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
- if (status == 0)
+ if (likely(status == 0)) {
sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
- else
+ } else if (status > 0) {
+ status = 0;
+ ehci_urb_done(ehci, urb, 0);
+ } else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
-done_not_linked:
+ }
+ done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
-
-done:
- if (status < 0)
- iso_stream_put (ehci, stream);
+ done:
return status;
}
/*-------------------------------------------------------------------------*/
-static void
-scan_periodic (struct ehci_hcd *ehci)
+static void scan_isoc(struct ehci_hcd *ehci)
{
- unsigned now_uframe, frame, clock, clock_frame, mod;
- unsigned modified;
-
- mod = ehci->periodic_size << 3;
+ unsigned uf, now_frame, frame;
+ unsigned fmask = ehci->periodic_size - 1;
+ bool modified, live;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
- now_uframe = ehci->next_uframe;
- if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
- clock = ehci_readl(ehci, &ehci->regs->frame_index);
- else
- clock = now_uframe + mod - 1;
- clock %= mod;
- clock_frame = clock >> 3;
+ if (ehci->rh_state >= EHCI_RH_RUNNING) {
+ uf = ehci_read_frame_index(ehci);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
+ } else {
+ now_frame = (ehci->last_iso_frame - 1) & fmask;
+ live = false;
+ }
+ ehci->now_frame = now_frame;
+ frame = ehci->last_iso_frame;
for (;;) {
union ehci_shadow q, *q_p;
__hc32 type, *hw_p;
- unsigned incomplete = false;
-
- frame = now_uframe >> 3;
restart:
/* scan each element in frame's queue for completions */
@@ -2135,42 +2409,17 @@ restart:
hw_p = &ehci->periodic [frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(ehci, *hw_p);
- modified = 0;
+ modified = false;
while (q.ptr != NULL) {
- unsigned uf;
- union ehci_shadow temp;
- int live;
-
- live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
switch (hc32_to_cpu(ehci, type)) {
- case Q_TYPE_QH:
- /* handle any completions */
- temp.qh = qh_get (q.qh);
- type = Q_NEXT_TYPE(ehci, q.qh->hw_next);
- q = q.qh->qh_next;
- modified = qh_completions (ehci, temp.qh);
- if (unlikely (list_empty (&temp.qh->qtd_list)))
- intr_deschedule (ehci, temp.qh);
- qh_put (temp.qh);
- break;
- case Q_TYPE_FSTN:
- /* for "save place" FSTNs, look at QH entries
- * in the previous frame for completions.
- */
- if (q.fstn->hw_prev != EHCI_LIST_END(ehci)) {
- dbg ("ignoring completions from FSTNs");
- }
- type = Q_NEXT_TYPE(ehci, q.fstn->hw_next);
- q = q.fstn->fstn_next;
- break;
case Q_TYPE_ITD:
/* If this ITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
- if (frame == clock_frame && live) {
+ if (frame == now_frame && live) {
rmb();
for (uf = 0; uf < 8; uf++) {
if (q.itd->hw_transaction[uf] &
@@ -2178,7 +2427,6 @@ restart:
break;
}
if (uf < 8) {
- incomplete = true;
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE(ehci,
@@ -2194,7 +2442,12 @@ restart:
* pointer for much longer, if at all.
*/
*q_p = q.itd->itd_next;
- *hw_p = q.itd->hw_next;
+ if (!ehci->use_dummy_qh ||
+ q.itd->hw_next != EHCI_LIST_END(ehci))
+ *hw_p = q.itd->hw_next;
+ else
+ *hw_p = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
wmb();
modified = itd_complete (ehci, q.itd);
@@ -2206,10 +2459,12 @@ restart:
* No need to check for activity unless the
* frame is current.
*/
- if (frame == clock_frame && live &&
- (q.sitd->hw_results &
- SITD_ACTIVE(ehci))) {
- incomplete = true;
+ if (((frame == now_frame) ||
+ (((frame + 1) & fmask) == now_frame))
+ && live
+ && (q.sitd->hw_results &
+ SITD_ACTIVE(ehci))) {
+
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
type = Q_NEXT_TYPE(ehci,
@@ -2223,63 +2478,40 @@ restart:
* URB completion.
*/
*q_p = q.sitd->sitd_next;
- *hw_p = q.sitd->hw_next;
+ if (!ehci->use_dummy_qh ||
+ q.sitd->hw_next != EHCI_LIST_END(ehci))
+ *hw_p = q.sitd->hw_next;
+ else
+ *hw_p = cpu_to_hc32(ehci,
+ ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
wmb();
modified = sitd_complete (ehci, q.sitd);
q = *q_p;
break;
default:
- dbg ("corrupt type %d frame %d shadow %p",
+ ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
// BUG ();
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
q.ptr = NULL;
+ break;
}
/* assume completion callbacks modify the queue */
- if (unlikely (modified)) {
- if (likely(ehci->periodic_sched > 0))
- goto restart;
- /* short-circuit this scan */
- now_uframe = clock;
- break;
- }
+ if (unlikely(modified && ehci->isoc_count > 0))
+ goto restart;
}
- /* If we can tell we caught up to the hardware, stop now.
- * We can't advance our scan without collecting the ISO
- * transfers that are still pending in this frame.
- */
- if (incomplete && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
- ehci->next_uframe = now_uframe;
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
break;
- }
-
- // FIXME: this assumes we won't get lapped when
- // latencies climb; that should be rare, but...
- // detect it, and just go all the way around.
- // FLR might help detect this case, so long as latencies
- // don't exceed periodic_size msec (default 1.024 sec).
-
- // FIXME: likewise assumes HC doesn't halt mid-scan
- if (now_uframe == clock) {
- unsigned now;
-
- if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
- || ehci->periodic_sched == 0)
- break;
- ehci->next_uframe = now_uframe;
- now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
- if (now_uframe == now)
- break;
-
- /* rescan the rest of this frame, then ... */
- clock = now;
- clock_frame = clock >> 3;
- } else {
- now_uframe++;
- now_uframe %= mod;
- }
+ /* The last frame may still have active siTDs */
+ ehci->last_iso_frame = frame;
+ frame = (frame + 1) & fmask;
}
}
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
new file mode 100644
index 00000000000..cf126767386
--- /dev/null
+++ b/drivers/usb/host/ehci-sead3.c
@@ -0,0 +1,187 @@
+/*
+ * MIPS CI13320A EHCI Host Controller driver
+ * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+static int ehci_sead3_setup(struct usb_hcd *hcd)
+{
+ int ret;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci->caps = hcd->regs + 0x100;
+
+#ifdef __BIG_ENDIAN
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+#endif
+
+ ret = ehci_setup(hcd);
+ if (ret)
+ return ret;
+
+ ehci->need_io_watchdog = 0;
+
+ /* Set burst length to 16 words. */
+ ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
+
+ return ret;
+}
+
+const struct hc_driver ehci_sead3_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "SEAD-3 EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ *
+ */
+ .reset = ehci_sead3_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ if (pdev->resource[1].flags != IORESOURCE_IRQ) {
+ pr_debug("resource[1] is not IORESOURCE_IRQ");
+ return -ENOMEM;
+ }
+ hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3");
+ if (!hcd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err1;
+ }
+
+ /* Root hub has integrated TT. */
+ hcd->has_tt = 1;
+
+ ret = usb_add_hcd(hcd, pdev->resource[1].start,
+ IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
+ return ret;
+ }
+
+err1:
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ehci_hcd_sead3_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_hcd_sead3_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+
+static const struct dev_pm_ops sead3_ehci_pmops = {
+ .suspend = ehci_hcd_sead3_drv_suspend,
+ .resume = ehci_hcd_sead3_drv_resume,
+};
+
+#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops)
+
+#else
+#define SEAD3_EHCI_PMOPS NULL
+#endif
+
+static struct platform_driver ehci_hcd_sead3_driver = {
+ .probe = ehci_hcd_sead3_drv_probe,
+ .remove = ehci_hcd_sead3_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "sead3-ehci",
+ .owner = THIS_MODULE,
+ .pm = SEAD3_EHCI_PMOPS,
+ }
+};
+
+MODULE_ALIAS("platform:sead3-ehci");
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
new file mode 100644
index 00000000000..9b9b9f5b016
--- /dev/null
+++ b/drivers/usb/host/ehci-sh.c
@@ -0,0 +1,206 @@
+/*
+ * SuperH EHCI host controller driver
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * Based on ohci-sh.c and ehci-atmel.c.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/platform_data/ehci-sh.h>
+
+struct ehci_sh_priv {
+ struct clk *iclk, *fclk;
+ struct usb_hcd *hcd;
+};
+
+static int ehci_sh_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ ehci->caps = hcd->regs;
+
+ return ehci_setup(hcd);
+}
+
+static const struct hc_driver ehci_sh_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "SuperH EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_sh_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+
+#ifdef CONFIG_PM
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_sh_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct ehci_sh_priv *priv;
+ struct ehci_sh_platdata *pdata;
+ struct usb_hcd *hcd;
+ int irq, ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ ret = -ENODEV;
+ goto fail_create_hcd;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ ret = -ENODEV;
+ goto fail_create_hcd;
+ }
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ /* initialize hcd */
+ hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto fail_request_resource;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct ehci_sh_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_dbg(&pdev->dev, "error allocating priv data\n");
+ ret = -ENOMEM;
+ goto fail_request_resource;
+ }
+
+ /* These are optional, we don't care if they fail */
+ priv->fclk = devm_clk_get(&pdev->dev, "usb_fck");
+ if (IS_ERR(priv->fclk))
+ priv->fclk = NULL;
+
+ priv->iclk = devm_clk_get(&pdev->dev, "usb_ick");
+ if (IS_ERR(priv->iclk))
+ priv->iclk = NULL;
+
+ clk_enable(priv->fclk);
+ clk_enable(priv->iclk);
+
+ if (pdata && pdata->phy_init)
+ pdata->phy_init();
+
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to add hcd");
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ priv->hcd = hcd;
+ platform_set_drvdata(pdev, priv);
+
+ return ret;
+
+fail_add_hcd:
+ clk_disable(priv->iclk);
+ clk_disable(priv->fclk);
+
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
+
+ return ret;
+}
+
+static int ehci_hcd_sh_remove(struct platform_device *pdev)
+{
+ struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = priv->hcd;
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ clk_disable(priv->fclk);
+ clk_disable(priv->iclk);
+
+ return 0;
+}
+
+static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
+{
+ struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = priv->hcd;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_hcd_sh_driver = {
+ .probe = ehci_hcd_sh_probe,
+ .remove = ehci_hcd_sh_remove,
+ .shutdown = ehci_hcd_sh_shutdown,
+ .driver = {
+ .name = "sh_ehci",
+ .owner = THIS_MODULE,
+ },
+};
+
+MODULE_ALIAS("platform:sh_ehci");
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
new file mode 100644
index 00000000000..1d59958ad0c
--- /dev/null
+++ b/drivers/usb/host/ehci-spear.c
@@ -0,0 +1,195 @@
+/*
+* Driver for EHCI HCD on SPEAr SOC
+*
+* Copyright (C) 2010 ST Micro Electronics,
+* Deepak Sikri <deepak.sikri@st.com>
+*
+* Based on various ehci-*.c drivers
+*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file COPYING in the main directory of this archive for
+* more details.
+*/
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI SPEAr driver"
+
+static const char hcd_name[] = "SPEAr-ehci";
+
+struct spear_ehci {
+ struct clk *clk;
+};
+
+#define to_spear_ehci(hcd) (struct spear_ehci *)(hcd_to_ehci(hcd)->priv)
+
+static struct hc_driver __read_mostly ehci_spear_hc_driver;
+
+#ifdef CONFIG_PM_SLEEP
+static int ehci_spear_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_spear_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ ehci_resume(hcd, false);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
+ ehci_spear_drv_resume);
+
+static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd ;
+ struct spear_ehci *sehci;
+ struct resource *res;
+ struct clk *usbh_clk;
+ const struct hc_driver *driver = &ehci_spear_hc_driver;
+ int irq, retval;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ retval = irq;
+ goto fail;
+ }
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto fail;
+
+ usbh_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usbh_clk)) {
+ dev_err(&pdev->dev, "Error getting interface clock\n");
+ retval = PTR_ERR(usbh_clk);
+ goto fail;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -ENODEV;
+ goto err_put_hcd;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err_put_hcd;
+ }
+
+ sehci = to_spear_ehci(hcd);
+ sehci->clk = usbh_clk;
+
+ /* registers start at offset 0x0 */
+ hcd_to_ehci(hcd)->caps = hcd->regs;
+
+ clk_prepare_enable(sehci->clk);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval)
+ goto err_stop_ehci;
+
+ device_wakeup_enable(hcd->self.controller);
+ return retval;
+
+err_stop_ehci:
+ clk_disable_unprepare(sehci->clk);
+err_put_hcd:
+ usb_put_hcd(hcd);
+fail:
+ dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+ return retval ;
+}
+
+static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct spear_ehci *sehci = to_spear_ehci(hcd);
+
+ usb_remove_hcd(hcd);
+
+ if (sehci->clk)
+ clk_disable_unprepare(sehci->clk);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct of_device_id spear_ehci_id_table[] = {
+ { .compatible = "st,spear600-ehci", },
+ { },
+};
+
+static struct platform_driver spear_ehci_hcd_driver = {
+ .probe = spear_ehci_hcd_drv_probe,
+ .remove = spear_ehci_hcd_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "spear-ehci",
+ .bus = &platform_bus_type,
+ .pm = &ehci_spear_pm_ops,
+ .of_match_table = spear_ehci_id_table,
+ }
+};
+
+static const struct ehci_driver_overrides spear_overrides __initdata = {
+ .extra_priv_size = sizeof(struct spear_ehci),
+};
+
+static int __init ehci_spear_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_spear_hc_driver, &spear_overrides);
+ return platform_driver_register(&spear_ehci_hcd_driver);
+}
+module_init(ehci_spear_init);
+
+static void __exit ehci_spear_cleanup(void)
+{
+ platform_driver_unregister(&spear_ehci_hcd_driver);
+}
+module_exit(ehci_spear_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:spear-ehci");
+MODULE_AUTHOR("Deepak Sikri");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
new file mode 100644
index 00000000000..f6459dfb6f5
--- /dev/null
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2007 by Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* this file is part of ehci-hcd.c */
+
+
+/* Display the ports dedicated to the companion controller */
+static ssize_t show_companion(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ehci_hcd *ehci;
+ int nports, index, n;
+ int count = PAGE_SIZE;
+ char *ptr = buf;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ nports = HCS_N_PORTS(ehci->hcs_params);
+
+ for (index = 0; index < nports; ++index) {
+ if (test_bit(index, &ehci->companion_ports)) {
+ n = scnprintf(ptr, count, "%d\n", index + 1);
+ ptr += n;
+ count -= n;
+ }
+ }
+ return ptr - buf;
+}
+
+/*
+ * Dedicate or undedicate a port to the companion controller.
+ * Syntax is "[-]portnum", where a leading '-' sign means
+ * return control of the port to the EHCI controller.
+ */
+static ssize_t store_companion(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehci_hcd *ehci;
+ int portnum, new_owner;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ new_owner = PORT_OWNER; /* Owned by companion */
+ if (sscanf(buf, "%d", &portnum) != 1)
+ return -EINVAL;
+ if (portnum < 0) {
+ portnum = - portnum;
+ new_owner = 0; /* Owned by EHCI */
+ }
+ if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
+ return -ENOENT;
+ portnum--;
+ if (new_owner)
+ set_bit(portnum, &ehci->companion_ports);
+ else
+ clear_bit(portnum, &ehci->companion_ports);
+ set_owner(ehci, portnum, new_owner);
+ return count;
+}
+static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
+
+
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ehci_hcd *ehci;
+ int n;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehci_hcd *ehci;
+ unsigned uframe_periodic_max;
+ unsigned uframe;
+ unsigned long flags;
+ ssize_t ret;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ ehci_info(ehci, "rejecting invalid request for "
+ "uframe_periodic_max=%u\n", uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave (&ehci->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * to see whether the decrease is possible.
+ */
+ if (uframe_periodic_max < ehci->uframe_periodic_max) {
+ u8 allocated_max = 0;
+
+ for (uframe = 0; uframe < EHCI_BANDWIDTH_SIZE; ++uframe)
+ allocated_max = max(allocated_max,
+ ehci->bandwidth[uframe]);
+
+ if (allocated_max > uframe_periodic_max) {
+ ehci_info(ehci,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ ehci_info(ehci, "setting max periodic bandwidth to %u%% "
+ "(== %u usec/uframe)\n",
+ 100*uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ ehci_warn(ehci, "max periodic bandwidth set is non-standard\n");
+
+ ehci->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore (&ehci->lock, flags);
+ return ret;
+}
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
+
+
+static inline int create_sysfs_files(struct ehci_hcd *ehci)
+{
+ struct device *controller = ehci_to_hcd(ehci)->self.controller;
+ int i = 0;
+
+ /* with integrated TT there is no companion! */
+ if (!ehci_is_TDI(ehci))
+ i = device_create_file(controller, &dev_attr_companion);
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct ehci_hcd *ehci)
+{
+ struct device *controller = ehci_to_hcd(ehci)->self.controller;
+
+ /* with integrated TT there is no companion! */
+ if (!ehci_is_TDI(ehci))
+ device_remove_file(controller, &dev_attr_companion);
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
new file mode 100644
index 00000000000..6fdcb8ad229
--- /dev/null
+++ b/drivers/usb/host/ehci-tegra.c
@@ -0,0 +1,569 @@
+/*
+ * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2009 - 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/tegra_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ehci.h"
+
+#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+
+#define TEGRA_USB_DMA_ALIGN 32
+
+#define DRIVER_DESC "Tegra EHCI driver"
+#define DRV_NAME "tegra-ehci"
+
+static struct hc_driver __read_mostly tegra_ehci_hc_driver;
+
+struct tegra_ehci_soc_config {
+ bool has_hostpc;
+};
+
+struct tegra_ehci_hcd {
+ struct tegra_usb_phy *phy;
+ struct clk *clk;
+ struct reset_control *rst;
+ int port_resuming;
+ bool needs_double_reset;
+ enum tegra_usb_phy_port_speed port_speed;
+};
+
+static int tegra_ehci_internal_port_reset(
+ struct ehci_hcd *ehci,
+ u32 __iomem *portsc_reg
+)
+{
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+ int i, tries;
+ u32 saved_usbintr;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+ saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
+ /* disable USB interrupt */
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /*
+ * Here we have to do Port Reset at most twice for
+ * Port Enable bit to be set.
+ */
+ for (i = 0; i < 2; i++) {
+ temp = ehci_readl(ehci, portsc_reg);
+ temp |= PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ mdelay(10);
+ temp &= ~PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ mdelay(1);
+ tries = 100;
+ do {
+ mdelay(1);
+ /*
+ * Up to this point, Port Enable bit is
+ * expected to be set after 2 ms waiting.
+ * USB1 usually takes extra 45 ms, for safety,
+ * we take 100 ms as timeout.
+ */
+ temp = ehci_readl(ehci, portsc_reg);
+ } while (!(temp & PORT_PE) && tries--);
+ if (temp & PORT_PE)
+ break;
+ }
+ if (i == 2)
+ retval = -ETIMEDOUT;
+
+ /*
+ * Clear Connect Status Change bit if it's set.
+ * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
+ */
+ if (temp & PORT_CSC)
+ ehci_writel(ehci, PORT_CSC, portsc_reg);
+
+ /*
+ * Write to clear any interrupt status bits that might be set
+ * during port reset.
+ */
+ temp = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, temp, &ehci->regs->status);
+
+ /* restore original interrupt enable bits */
+ ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
+ return retval;
+}
+
+static int tegra_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv;
+ u32 __iomem *status_reg;
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ if (typeReq == GetPortStatus) {
+ temp = ehci_readl(ehci, status_reg);
+ if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
+ /* Resume completed, re-enable disconnect detection */
+ tegra->port_resuming = 0;
+ tegra_usb_phy_postresume(hcd->phy);
+ }
+ }
+
+ else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+ /*
+ * If a transaction is in progress, there may be a delay in
+ * suspending the port. Poll until the port is suspended.
+ */
+ if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
+ PORT_SUSPEND, 5000))
+ pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ goto done;
+ }
+
+ /* For USB1 port we need to issue Port Reset twice internally */
+ if (tegra->needs_double_reset &&
+ (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return tegra_ehci_internal_port_reset(ehci, status_reg);
+ }
+
+ /*
+ * Tegra host controller will time the resume operation to clear the bit
+ * when the port control state switches to HS or FS Idle. This behavior
+ * is different from EHCI where the host controller driver is required
+ * to set this bit to a zero after the resume duration is timed in the
+ * driver.
+ */
+ else if (typeReq == ClearPortFeature &&
+ wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ if (!(temp & PORT_SUSPEND))
+ goto done;
+
+ /* Disable disconnect detection during port resume */
+ tegra_usb_phy_preresume(hcd->phy);
+
+ ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ /* start resume signalling */
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ set_bit(wIndex-1, &ehci->resuming_ports);
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ /* Poll until the controller clears RESUME and SUSPEND */
+ if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
+ pr_err("%s: timeout waiting for RESUME\n", __func__);
+ if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
+ pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+ ehci->reset_done[wIndex-1] = 0;
+ clear_bit(wIndex-1, &ehci->resuming_ports);
+
+ tegra->port_resuming = 1;
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
+struct dma_aligned_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
+static void free_dma_aligned_buffer(struct urb *urb)
+{
+ struct dma_aligned_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ temp = container_of(urb->transfer_buffer,
+ struct dma_aligned_buffer, data);
+
+ if (usb_urb_dir_in(urb))
+ memcpy(temp->old_xfer_buffer, temp->data,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ struct dma_aligned_buffer *temp, *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct dma_aligned_buffer) + TEGRA_USB_DMA_ALIGN - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct dma_aligned_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1;
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = alloc_dma_aligned_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ free_dma_aligned_buffer(urb);
+
+ return ret;
+}
+
+static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ free_dma_aligned_buffer(urb);
+}
+
+static const struct tegra_ehci_soc_config tegra30_soc_config = {
+ .has_hostpc = true,
+};
+
+static const struct tegra_ehci_soc_config tegra20_soc_config = {
+ .has_hostpc = false,
+};
+
+static struct of_device_id tegra_ehci_of_match[] = {
+ { .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config },
+ { .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config },
+ { },
+};
+
+static int tegra_ehci_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct tegra_ehci_soc_config *soc_config;
+ struct resource *res;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct tegra_ehci_hcd *tegra;
+ int err = 0;
+ int irq;
+ struct usb_phy *u_phy;
+
+ match = of_match_device(tegra_ehci_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ soc_config = match->data;
+
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, hcd);
+ ehci = hcd_to_ehci(hcd);
+ tegra = (struct tegra_ehci_hcd *)ehci->priv;
+
+ hcd->has_tt = 1;
+
+ tegra->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tegra->clk)) {
+ dev_err(&pdev->dev, "Can't get ehci clock\n");
+ err = PTR_ERR(tegra->clk);
+ goto cleanup_hcd_create;
+ }
+
+ tegra->rst = devm_reset_control_get(&pdev->dev, "usb");
+ if (IS_ERR(tegra->rst)) {
+ dev_err(&pdev->dev, "Can't get ehci reset\n");
+ err = PTR_ERR(tegra->rst);
+ goto cleanup_hcd_create;
+ }
+
+ err = clk_prepare_enable(tegra->clk);
+ if (err)
+ goto cleanup_hcd_create;
+
+ reset_control_assert(tegra->rst);
+ udelay(1);
+ reset_control_deassert(tegra->rst);
+
+ u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
+ if (IS_ERR(u_phy)) {
+ err = PTR_ERR(u_phy);
+ goto cleanup_clk_en;
+ }
+ hcd->phy = u_phy;
+
+ tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
+ "nvidia,needs-double-reset");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto cleanup_clk_en;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto cleanup_clk_en;
+ }
+ ehci->caps = hcd->regs + 0x100;
+ ehci->has_hostpc = soc_config->has_hostpc;
+
+ err = usb_phy_init(hcd->phy);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize phy\n");
+ goto cleanup_clk_en;
+ }
+
+ u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+ GFP_KERNEL);
+ if (!u_phy->otg) {
+ dev_err(&pdev->dev, "Failed to alloc memory for otg\n");
+ err = -ENOMEM;
+ goto cleanup_phy;
+ }
+ u_phy->otg->host = hcd_to_bus(hcd);
+
+ err = usb_phy_set_suspend(hcd->phy, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to power on the phy\n");
+ goto cleanup_phy;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto cleanup_phy;
+ }
+
+ otg_set_host(u_phy->otg, &hcd->self);
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto cleanup_otg_set_host;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return err;
+
+cleanup_otg_set_host:
+ otg_set_host(u_phy->otg, NULL);
+cleanup_phy:
+ usb_phy_shutdown(hcd->phy);
+cleanup_clk_en:
+ clk_disable_unprepare(tegra->clk);
+cleanup_hcd_create:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int tegra_ehci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tegra_ehci_hcd *tegra =
+ (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+
+ otg_set_host(hcd->phy->otg, NULL);
+
+ usb_phy_shutdown(hcd->phy);
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+
+ clk_disable_unprepare(tegra->clk);
+
+ return 0;
+}
+
+static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver tegra_ehci_driver = {
+ .probe = tegra_ehci_probe,
+ .remove = tegra_ehci_remove,
+ .shutdown = tegra_ehci_hcd_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = tegra_ehci_of_match,
+ }
+};
+
+static int tegra_ehci_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+ int txfifothresh;
+
+ retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ /*
+ * We should really pull this value out of tegra_ehci_soc_config, but
+ * to avoid needing access to it, make use of the fact that Tegra20 is
+ * the only one so far that needs a value of 10, and Tegra20 is the
+ * only one which doesn't set has_hostpc.
+ */
+ txfifothresh = ehci->has_hostpc ? 0x10 : 10;
+ ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning);
+
+ return 0;
+}
+
+static const struct ehci_driver_overrides tegra_overrides __initconst = {
+ .extra_priv_size = sizeof(struct tegra_ehci_hcd),
+ .reset = tegra_ehci_reset,
+};
+
+static int __init ehci_tegra_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info(DRV_NAME ": " DRIVER_DESC "\n");
+
+ ehci_init_driver(&tegra_ehci_hc_driver, &tegra_overrides);
+
+ /*
+ * The Tegra HW has some unusual quirks, which require Tegra-specific
+ * workarounds. We override certain hc_driver functions here to
+ * achieve that. We explicitly do not enhance ehci_driver_overrides to
+ * allow this more easily, since this is an unusual case, and we don't
+ * want to encourage others to override these functions by making it
+ * too easy.
+ */
+
+ tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma;
+ tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma;
+ tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control;
+
+ return platform_driver_register(&tegra_ehci_driver);
+}
+module_init(ehci_tegra_init);
+
+static void __exit ehci_tegra_cleanup(void)
+{
+ platform_driver_unregister(&tegra_ehci_driver);
+}
+module_exit(ehci_tegra_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra_ehci_of_match);
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
new file mode 100644
index 00000000000..0d247673c3c
--- /dev/null
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Tilera TILE-Gx USB EHCI host controller driver.
+ */
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/usb.h>
+
+#include <asm/homecache.h>
+
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/usb_host.h>
+
+static void tilegx_start_ehc(void)
+{
+}
+
+static void tilegx_stop_ehc(void)
+{
+}
+
+static int tilegx_ehci_setup(struct usb_hcd *hcd)
+{
+ int ret = ehci_init(hcd);
+
+ /*
+ * Some drivers do:
+ *
+ * struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ * ehci->need_io_watchdog = 0;
+ *
+ * here, but since this is a new driver we're going to leave the
+ * watchdog enabled. Later we may try to turn it off and see
+ * whether we run into any problems.
+ */
+
+ return ret;
+}
+
+static const struct hc_driver ehci_tilegx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tile-Gx EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * Generic hardware linkage.
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * Basic lifecycle operations.
+ */
+ .reset = tilegx_ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * Managing I/O requests and associated device resources.
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * Scheduling support.
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * Root hub support.
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ pte_t pte = { 0 };
+ int my_cpu = smp_processor_id();
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Try to initialize our GXIO context; if we can't, the device
+ * doesn't exist.
+ */
+ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 1) != 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_hcd;
+ }
+
+ /*
+ * We don't use rsrc_start to map in our registers, but seems like
+ * we ought to set it to something, so we use the register VA.
+ */
+ hcd->rsrc_start =
+ (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+ hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
+ hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+
+ tilegx_start_ehc();
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+ ehci->regs =
+ hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+ /* Create our IRQs and register them. */
+ pdata->irq = irq_alloc_hwirq(-1);
+ if (!pdata->irq) {
+ ret = -ENXIO;
+ goto err_no_irq;
+ }
+
+ tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
+
+ /* Configure interrupts. */
+ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
+ cpu_x(my_cpu), cpu_y(my_cpu),
+ KERNEL_PL, pdata->irq);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ /* Register all of our memory. */
+ pte = pte_set_home(pte, PAGE_HOME_HASH);
+ ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ device_wakeup_enable(hcd->self.controller);
+ return ret;
+ }
+
+err_have_irq:
+ irq_free_hwirq(pdata->irq);
+err_no_irq:
+ tilegx_stop_ehc();
+ usb_put_hcd(hcd);
+err_hcd:
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ return ret;
+}
+
+static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ tilegx_stop_ehc();
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ irq_free_hwirq(pdata->irq);
+
+ return 0;
+}
+
+static void ehci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
+{
+ usb_hcd_platform_shutdown(pdev);
+ ehci_hcd_tilegx_drv_remove(pdev);
+}
+
+static struct platform_driver ehci_hcd_tilegx_driver = {
+ .probe = ehci_hcd_tilegx_drv_probe,
+ .remove = ehci_hcd_tilegx_drv_remove,
+ .shutdown = ehci_hcd_tilegx_drv_shutdown,
+ .driver = {
+ .name = "tilegx-ehci",
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:tilegx-ehci");
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
new file mode 100644
index 00000000000..424ac5d8371
--- /dev/null
+++ b/drivers/usb/host/ehci-timer.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2012 by Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/* This file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+ ehci->command |= bit;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ /* unblock posted write */
+ ehci_readl(ehci, &ehci->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+ ehci->command &= ~bit;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ /* unblock posted write */
+ ehci_readl(ehci, &ehci->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from ehci->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * ehci->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum ehci_hrtimer_event in ehci.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */
+ 5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */
+ 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &ehci->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ ehci->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < ehci->next_hrtimer_event) {
+ ehci->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void ehci_poll_ASS(struct ehci_hcd *ehci)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
+ actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 2-4 ms */
+ if (ehci->ASS_poll_count++ < 2) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+ return;
+ }
+ ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ ehci->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (ehci->async_count > 0)
+ ehci_set_command_bit(ehci, CMD_ASE);
+
+ } else { /* Running */
+ if (ehci->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void ehci_disable_ASE(struct ehci_hcd *ehci)
+{
+ ehci_clear_command_bit(ehci, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void ehci_poll_PSS(struct ehci_hcd *ehci)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
+ actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 2-4 ms */
+ if (ehci->PSS_poll_count++ < 2) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+ return;
+ }
+ ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ ehci->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (ehci->periodic_count > 0)
+ ehci_set_command_bit(ehci, CMD_PSE);
+
+ } else { /* Running */
+ if (ehci->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void ehci_disable_PSE(struct ehci_hcd *ehci)
+{
+ ehci_clear_command_bit(ehci, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void ehci_handle_controller_death(struct ehci_hcd *ehci)
+{
+ if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (ehci->died_poll_count++ < 5) {
+ /* Try again later */
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ ehci->rh_state = EHCI_RH_HALTED;
+ ehci_writel(ehci, 0, &ehci->regs->configured_flag);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ ehci_work(ehci);
+ end_unlink_async(ehci);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+/* start to unlink interrupt QHs */
+static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
+{
+ bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ while (!list_empty(&ehci->intr_unlink_wait)) {
+ struct ehci_qh *qh;
+
+ qh = list_first_entry(&ehci->intr_unlink_wait,
+ struct ehci_qh, unlink_node);
+ if (!stopped && (qh->unlink_cycle ==
+ ehci->intr_unlink_wait_cycle))
+ break;
+ list_del_init(&qh->unlink_node);
+ start_unlink_intr(ehci, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (!list_empty(&ehci->intr_unlink_wait)) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+ ++ehci->intr_unlink_wait_cycle;
+ }
+}
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
+{
+ bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ ehci->intr_unlinking = true;
+ while (!list_empty(&ehci->intr_unlink)) {
+ struct ehci_qh *qh;
+
+ qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
+ unlink_node);
+ if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
+ break;
+ list_del_init(&qh->unlink_node);
+ end_unlink_intr(ehci, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (!list_empty(&ehci->intr_unlink)) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+ ++ehci->intr_unlink_cycle;
+ }
+ ehci->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct ehci_hcd *ehci)
+{
+ if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
+ ehci->last_itd_to_free = list_entry(
+ ehci->cached_itd_list.prev,
+ struct ehci_itd, itd_list);
+ ehci->last_sitd_to_free = list_entry(
+ ehci->cached_sitd_list.prev,
+ struct ehci_sitd, sitd_list);
+ ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct ehci_hcd *ehci)
+{
+ struct ehci_itd *itd, *n;
+ struct ehci_sitd *sitd, *sn;
+
+ if (ehci->rh_state < EHCI_RH_RUNNING) {
+ ehci->last_itd_to_free = NULL;
+ ehci->last_sitd_to_free = NULL;
+ }
+
+ list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
+ if (itd == ehci->last_itd_to_free)
+ break;
+ }
+ list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
+ list_del(&sitd->sitd_list);
+ dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
+ if (sitd == ehci->last_sitd_to_free)
+ break;
+ }
+
+ if (!list_empty(&ehci->cached_itd_list) ||
+ !list_empty(&ehci->cached_sitd_list))
+ start_free_itds(ehci);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
+{
+ u32 cmd, status;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = ehci_readl(ehci, &ehci->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = ehci_readl(ehci, &ehci->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(ehci->stats.lost_iaa);
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+ }
+
+ ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
+ end_unlink_async(ehci);
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct ehci_hcd *ehci)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (ehci->rh_state != EHCI_RH_RUNNING ||
+ (ehci->enabled_hrtimer_events &
+ BIT(EHCI_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
+ ehci->async_count + ehci->intr_count > 0))
+ ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum ehci_hrtimer_event in ehci.h.
+ */
+static void (*event_handlers[])(struct ehci_hcd *) = {
+ ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */
+ ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */
+ ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */
+ ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */
+ ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */
+ unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */
+ ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */
+ ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */
+ ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */
+ ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
+{
+ struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ events = ehci->enabled_hrtimer_events;
+ ehci->enabled_hrtimer_events = 0;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= ehci->hr_timeouts[e].tv64)
+ event_handlers[e](ehci);
+ else
+ ehci_enable_event(ehci, e, false);
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return HRTIMER_NORESTART;
+}
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
new file mode 100644
index 00000000000..a9303aff125
--- /dev/null
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -0,0 +1,154 @@
+/*
+ * linux/driver/usb/host/ehci-w90x900.c
+ *
+ * Copyright (c) 2008 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+/* enable phy0 and phy1 for w90p910 */
+#define ENPHY (0x01<<8)
+#define PHY0_CTR (0xA4)
+#define PHY1_CTR (0xA8)
+
+#define DRIVER_DESC "EHCI w90x900 driver"
+
+static const char hcd_name[] = "ehci-w90x900 ";
+
+static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
+
+static int usb_w90x900_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource *res;
+ int retval = 0, irq;
+ unsigned long val;
+
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -ENXIO;
+ goto err1;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI");
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err2;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+ /* enable PHY 0,1,the regs only apply to w90p910
+ * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
+ * w90p910 IC relative to ehci->regs.
+ */
+ val = __raw_readl(ehci->regs+PHY0_CTR);
+ val |= ENPHY;
+ __raw_writel(val, ehci->regs+PHY0_CTR);
+
+ val = __raw_readl(ehci->regs+PHY1_CTR);
+ val |= ENPHY;
+ __raw_writel(val, ehci->regs+PHY1_CTR);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err2;
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval != 0)
+ goto err2;
+
+ device_wakeup_enable(hcd->self.controller);
+ return retval;
+err2:
+ usb_put_hcd(hcd);
+err1:
+ return retval;
+}
+
+static void usb_w90x900_remove(struct usb_hcd *hcd,
+ struct platform_device *pdev)
+{
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+}
+
+static int ehci_w90x900_probe(struct platform_device *pdev)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev);
+}
+
+static int ehci_w90x900_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_w90x900_remove(hcd, pdev);
+
+ return 0;
+}
+
+static struct platform_driver ehci_hcd_w90x900_driver = {
+ .probe = ehci_w90x900_probe,
+ .remove = ehci_w90x900_remove,
+ .driver = {
+ .name = "w90x900-ehci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ehci_w90X900_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+ ehci_init_driver(&ehci_w90x900_hc_driver, NULL);
+ return platform_driver_register(&ehci_hcd_w90x900_driver);
+}
+module_init(ehci_w90X900_init);
+
+static void __exit ehci_w90X900_cleanup(void)
+{
+ platform_driver_unregister(&ehci_hcd_w90x900_driver);
+}
+module_exit(ehci_w90X900_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_ALIAS("platform:w90p910-ehci");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
new file mode 100644
index 00000000000..fe57710753e
--- /dev/null
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -0,0 +1,241 @@
+/*
+ * EHCI HCD (Host Controller Driver) for USB.
+ *
+ * Bus Glue for Xilinx EHCI core on the of_platform bus
+ *
+ * Copyright (c) 2009 Xilinx, Inc.
+ *
+ * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
+ * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/signal.h>
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+/**
+ * ehci_xilinx_port_handed_over - hand the port out if failed to enable it
+ * @hcd: Pointer to the usb_hcd device to which the host controller bound
+ * @portnum:Port number to which the device is attached.
+ *
+ * This function is used as a place to tell the user that the Xilinx USB host
+ * controller does support LS devices. And in an HS only configuration, it
+ * does not support FS devices either. It is hoped that this can help a
+ * confused user.
+ *
+ * There are cases when the host controller fails to enable the port due to,
+ * for example, insufficient power that can be supplied to the device from
+ * the USB bus. In those cases, the messages printed here are not helpful.
+ */
+static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
+{
+ dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
+ if (hcd->has_tt) {
+ dev_warn(hcd->self.controller,
+ "Maybe you have connected a low speed device?\n");
+
+ dev_warn(hcd->self.controller,
+ "We do not support low speed devices\n");
+ } else {
+ dev_warn(hcd->self.controller,
+ "Maybe your device is not a high speed device?\n");
+ dev_warn(hcd->self.controller,
+ "The USB host controller does not support full speed "
+ "nor low speed devices\n");
+ dev_warn(hcd->self.controller,
+ "You can reconfigure the host controller to have "
+ "full speed support\n");
+ }
+
+ return 0;
+}
+
+
+static const struct hc_driver ehci_xilinx_of_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "OF EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+ .relinquish_port = NULL,
+ .port_handed_over = ehci_xilinx_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+/**
+ * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
+ * @op: pointer to the platform_device bound to the host controller
+ *
+ * This function requests resources and sets up appropriate properties for the
+ * host controller. Because the Xilinx USB host controller can be configured
+ * as HS only or HS/FS only, it checks the configuration in the device tree
+ * entry, and sets an appropriate value for hcd->has_tt.
+ */
+static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource res;
+ int irq;
+ int rv;
+ int *value;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
+
+ rv = of_address_to_resource(dn, 0, &res);
+ if (rv)
+ return rv;
+
+ hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
+ "XILINX-OF USB");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res.start;
+ hcd->rsrc_len = resource_size(&res);
+
+ irq = irq_of_parse_and_map(dn, 0);
+ if (!irq) {
+ dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+ __FILE__);
+ rv = -EBUSY;
+ goto err_irq;
+ }
+
+ hcd->regs = devm_ioremap_resource(&op->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ rv = PTR_ERR(hcd->regs);
+ goto err_irq;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+
+ /* This core always has big-endian register interface and uses
+ * big-endian memory descriptors.
+ */
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+ /* Check whether the FS support option is selected in the hardware.
+ */
+ value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
+ if (value && (*value == 1)) {
+ ehci_dbg(ehci, "USB host controller supports FS devices\n");
+ hcd->has_tt = 1;
+ } else {
+ ehci_dbg(ehci,
+ "USB host controller is HS only\n");
+ hcd->has_tt = 0;
+ }
+
+ /* Debug registers are at the first 0x100 region
+ */
+ ehci->caps = hcd->regs + 0x100;
+
+ rv = usb_add_hcd(hcd, irq, 0);
+ if (rv == 0) {
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+ }
+
+err_irq:
+ usb_put_hcd(hcd);
+
+ return rv;
+}
+
+/**
+ * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
+ * @op: pointer to platform_device structure that is to be removed
+ *
+ * Remove the hcd structure, and release resources that has been requested
+ * during probe.
+ */
+static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(op);
+
+ dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
+
+ usb_remove_hcd(hcd);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
+ {.compatible = "xlnx,xps-usb-host-1.00.a",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
+
+static struct platform_driver ehci_hcd_xilinx_of_driver = {
+ .probe = ehci_hcd_xilinx_of_probe,
+ .remove = ehci_hcd_xilinx_of_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "xilinx-of-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_hcd_xilinx_of_match,
+ },
+};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index fb7054ccf4f..eee228a26a0 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -37,12 +37,16 @@ typedef __u16 __bitwise __hc16;
#define __hc16 __le16
#endif
-/* statistics can be kept for for tuning/monitoring */
+/* statistics can be kept for tuning/monitoring */
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define EHCI_STATS
+#endif
+
struct ehci_stats {
/* irq usage */
unsigned long normal;
unsigned long error;
- unsigned long reclaim;
+ unsigned long iaa;
unsigned long lost_iaa;
/* termination of urbs from core */
@@ -50,8 +54,30 @@ struct ehci_stats {
unsigned long unlink;
};
+/*
+ * Scheduling and budgeting information for periodic transfers, for both
+ * high-speed devices and full/low-speed devices lying behind a TT.
+ */
+struct ehci_per_sched {
+ struct usb_device *udev; /* access to the TT */
+ struct usb_host_endpoint *ep;
+ struct list_head ps_list; /* node on ehci_tt's ps_list */
+ u16 tt_usecs; /* time on the FS/LS bus */
+ u16 cs_mask; /* C-mask and S-mask bytes */
+ u16 period; /* actual period in frames */
+ u16 phase; /* actual phase, frame part */
+ u8 bw_phase; /* same, for bandwidth
+ reservation */
+ u8 phase_uf; /* uframe part of the phase */
+ u8 usecs, c_usecs; /* times on the HS bus */
+ u8 bw_uperiod; /* period in microframes, for
+ bandwidth reservation */
+ u8 bw_period; /* same, in frames */
+};
+#define NO_FRAME 29999 /* frame not assigned yet */
+
/* ehci_hcd->lock guards shared data against other CPUs:
- * ehci_hcd: async, reclaim, periodic (and shadow), ...
+ * ehci_hcd: async, unlink, periodic (and shadow), ...
* usb_host_endpoint: hcpriv
* ehci_qh: qh_next, qtd_list
* ehci_qtd: qtd_list
@@ -62,7 +88,49 @@ struct ehci_stats {
#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
+/*
+ * ehci_rh_state values of EHCI_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
+enum ehci_rh_state {
+ EHCI_RH_HALTED,
+ EHCI_RH_SUSPENDED,
+ EHCI_RH_RUNNING,
+ EHCI_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum ehci_hrtimer_event {
+ EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */
+ EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ EHCI_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ EHCI_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define EHCI_HRTIMER_NO_EVENT 99
+
struct ehci_hcd { /* one per controller */
+ /* timing support */
+ enum ehci_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[EHCI_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
/* glue to PCI and HCD framework */
struct ehci_caps __iomem *caps;
struct ehci_regs __iomem *regs;
@@ -70,22 +138,51 @@ struct ehci_hcd { /* one per controller */
__u32 hcs_params; /* cached register copy */
spinlock_t lock;
+ enum ehci_rh_state rh_state;
+
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool iaa_in_progress:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct ehci_qh *qh_scan_next;
/* async schedule support */
struct ehci_qh *async;
- struct ehci_qh *reclaim;
- unsigned scanning : 1;
+ struct ehci_qh *dummy; /* For AMD quirk use */
+ struct list_head async_unlink;
+ struct list_head async_idle;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
unsigned periodic_size;
__hc32 *periodic; /* hw periodic table */
dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
unsigned i_thresh; /* uframes HC might cache */
union ehci_shadow *pshadow; /* mirror hw periodic table */
- int next_uframe; /* scan periodic, start here */
- unsigned periodic_sched; /* periodic activity count */
+ struct list_head intr_unlink_wait;
+ struct list_head intr_unlink;
+ unsigned intr_unlink_wait_cycle;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned last_iso_frame; /* last frame scanned for iso */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
+ unsigned uframe_periodic_max; /* max periodic time per uframe */
+
+
+ /* list of itds & sitds completed while now_frame was still active */
+ struct list_head cached_itd_list;
+ struct ehci_itd *last_itd_to_free;
+ struct list_head cached_sitd_list;
+ struct ehci_sitd *last_sitd_to_free;
/* per root hub port */
unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
@@ -101,6 +198,8 @@ struct ehci_hcd { /* one per controller */
the change-suspend feature turned on */
unsigned long suspended_ports; /* which ports are
suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
/* per-HC memory pools (could be per-bus, but ...) */
struct dma_pool *qh_pool; /* qh per active urb */
@@ -108,11 +207,9 @@ struct ehci_hcd { /* one per controller */
struct dma_pool *itd_pool; /* itd per iso urb */
struct dma_pool *sitd_pool; /* sitd per split iso urb */
- struct timer_list iaa_watchdog;
- struct timer_list watchdog;
- unsigned long actions;
- unsigned stamp;
+ unsigned random_frame;
unsigned long next_statechange;
+ ktime_t last_periodic_enable;
u32 command;
/* SILICON QUIRKS */
@@ -120,7 +217,15 @@ struct ehci_hcd { /* one per controller */
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
+ unsigned big_endian_capbase:1;
unsigned has_amcc_usb23:1;
+ unsigned need_io_watchdog:1;
+ unsigned amd_pll_fix:1;
+ unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
+ unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned need_oc_pp_cycle:1; /* MPC834X port power */
+ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -130,7 +235,9 @@ struct ehci_hcd { /* one per controller */
#define OHCI_HCCTRL_OFFSET 0x4
#define OHCI_HCCTRL_LEN 0x4
__hc32 *ohci_hcctrl_reg;
-
+ unsigned has_hostpc:1;
+ unsigned has_tdi_phy_lpm:1;
+ unsigned has_ppcd:1; /* support per-port change bits */
u8 sbrn; /* packed release number */
/* irq statistics */
@@ -142,12 +249,21 @@ struct ehci_hcd { /* one per controller */
#endif
/* debug files */
-#ifdef DEBUG
+#ifdef CONFIG_DYNAMIC_DEBUG
struct dentry *debug_dir;
- struct dentry *debug_async;
- struct dentry *debug_periodic;
- struct dentry *debug_registers;
#endif
+
+ /* bandwidth usage */
+#define EHCI_BANDWIDTH_SIZE 64
+#define EHCI_BANDWIDTH_FRAMES (EHCI_BANDWIDTH_SIZE >> 3)
+ u8 bandwidth[EHCI_BANDWIDTH_SIZE];
+ /* us allocated per uframe */
+ u8 tt_budget[EHCI_BANDWIDTH_SIZE];
+ /* us budgeted per uframe */
+ struct list_head tt_list;
+
+ /* platform-specific data -- must come last */
+ unsigned long priv[0] __aligned(sizeof(s64));
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -160,66 +276,6 @@ static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci)
return container_of ((void *) ehci, struct usb_hcd, hcd_priv);
}
-
-static inline void
-iaa_watchdog_start(struct ehci_hcd *ehci)
-{
- WARN_ON(timer_pending(&ehci->iaa_watchdog));
- mod_timer(&ehci->iaa_watchdog,
- jiffies + msecs_to_jiffies(EHCI_IAA_MSECS));
-}
-
-static inline void iaa_watchdog_done(struct ehci_hcd *ehci)
-{
- del_timer(&ehci->iaa_watchdog);
-}
-
-enum ehci_timer_action {
- TIMER_IO_WATCHDOG,
- TIMER_ASYNC_SHRINK,
- TIMER_ASYNC_OFF,
-};
-
-static inline void
-timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
-{
- clear_bit (action, &ehci->actions);
-}
-
-static inline void
-timer_action (struct ehci_hcd *ehci, enum ehci_timer_action action)
-{
- /* Don't override timeouts which shrink or (later) disable
- * the async ring; just the I/O watchdog. Note that if a
- * SHRINK were pending, OFF would never be requested.
- */
- if (timer_pending(&ehci->watchdog)
- && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
- & ehci->actions))
- return;
-
- if (!test_and_set_bit (action, &ehci->actions)) {
- unsigned long t;
-
- switch (action) {
- case TIMER_IO_WATCHDOG:
- t = EHCI_IO_JIFFIES;
- break;
- case TIMER_ASYNC_OFF:
- t = EHCI_ASYNC_JIFFIES;
- break;
- // case TIMER_ASYNC_SHRINK:
- default:
- /* add a jiffie since we synch against the
- * 8 KHz uframe counter.
- */
- t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
- break;
- }
- mod_timer(&ehci->watchdog, t + jiffies);
- }
-}
-
/*-------------------------------------------------------------------------*/
#include <linux/usb/ehci_def.h>
@@ -281,7 +337,7 @@ struct ehci_qtd {
/*
* Now the following defines are not converted using the
- * __constant_cpu_to_le32() macro anymore, since we have to support
+ * cpu_to_le32() macro anymore, since we have to support
* "dynamic" switching between be and le support, so that the driver
* can be used on one system with SoC EHCI controller using big-endian
* descriptors as well as a normal little-endian PCI EHCI controller.
@@ -325,11 +381,17 @@ union ehci_shadow {
* These appear in both the async and (for interrupt) periodic schedules.
*/
-struct ehci_qh {
- /* first part defined by EHCI spec */
+/* first part defined by EHCI spec */
+struct ehci_qh_hw {
__hc32 hw_next; /* see EHCI 3.6.1 */
__hc32 hw_info1; /* see EHCI 3.6.2 */
-#define QH_HEAD 0x00008000
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
__hc32 hw_info2; /* see EHCI 3.6.2 */
#define QH_SMASK 0x000000ff
#define QH_CMASK 0x0000ff00
@@ -344,42 +406,39 @@ struct ehci_qh {
__hc32 hw_token;
__hc32 hw_buf [5];
__hc32 hw_buf_hi [5];
+} __attribute__ ((aligned(32)));
+struct ehci_qh {
+ struct ehci_qh_hw *hw; /* Must come first */
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */
union ehci_shadow qh_next; /* ptr to qh; or periodic */
struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
struct ehci_qtd *dummy;
- struct ehci_qh *reclaim; /* next to reclaim */
+ struct list_head unlink_node;
+ struct ehci_per_sched ps; /* scheduling info */
- struct ehci_hcd *ehci;
-
- /*
- * Do NOT use atomic operations for QH refcounting. On some CPUs
- * (PPC7448 for example), atomic operations cannot be performed on
- * memory that is cache-inhibited (i.e. being used for DMA).
- * Spinlocks are used to protect all QH fields.
- */
- u32 refcount;
- unsigned stamp;
+ unsigned unlink_cycle;
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
-#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
- /* periodic schedule info */
- u8 usecs; /* intr bandwidth */
+ u8 xacterrs; /* XactErr retry counter */
+#define QH_XACTERR_MAX 32 /* XactErr retry limit */
+
u8 gap_uf; /* uframes split/csplit gap */
- u8 c_usecs; /* ... split completion bw */
- u16 tt_usecs; /* tt downstream bandwidth */
- unsigned short period; /* polling interval */
- unsigned short start; /* where polling starts */
-#define NO_FRAME ((unsigned short)~0) /* pick new start */
- struct usb_device *dev; /* access to TT */
-} __attribute__ ((aligned (32)));
+
+ unsigned is_out:1; /* bulk or intr OUT */
+ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+ unsigned dequeue_during_giveback:1;
+ unsigned exception:1; /* got a fault, or an unlink
+ was requested */
+};
/*-------------------------------------------------------------------------*/
@@ -400,6 +459,7 @@ struct ehci_iso_packet {
struct ehci_iso_sched {
struct list_head td_list;
unsigned span;
+ unsigned first_packet;
struct ehci_iso_packet packet [0];
};
@@ -408,34 +468,24 @@ struct ehci_iso_sched {
* acts like a qh would, if EHCI had them for ISO.
*/
struct ehci_iso_stream {
- /* first two fields match QH, but info1 == 0 */
- __hc32 hw_next;
- __hc32 hw_info1;
+ /* first field matches ehci_hq, but is NULL */
+ struct ehci_qh_hw *hw;
- u32 refcount;
u8 bEndpointAddress;
u8 highspeed;
- u16 depth; /* depth in uframes */
struct list_head td_list; /* queued itds/sitds */
struct list_head free_list; /* list of unused itds/sitds */
- struct usb_device *udev;
- struct usb_host_endpoint *ep;
/* output of (re)scheduling */
- unsigned long start; /* jiffies */
- unsigned long rescheduled;
- int next_uframe;
+ struct ehci_per_sched ps; /* scheduling info */
+ unsigned next_uframe;
__hc32 splits;
/* the rest is derived from the endpoint descriptor,
- * trusting urb->interval == f(epdesc->bInterval) and
* including the extra info for hw_bufp[0..2]
*/
- u8 usecs, c_usecs;
- u16 interval;
- u16 tt_usecs;
+ u16 uperiod; /* period in uframes */
u16 maxp;
- u16 raw_mask;
unsigned bandwidth;
/* This is used to initialize iTD's hw_bufp fields */
@@ -550,6 +600,45 @@ struct ehci_fstn {
/*-------------------------------------------------------------------------*/
+/*
+ * USB-2.0 Specification Sections 11.14 and 11.18
+ * Scheduling and budgeting split transactions using TTs
+ *
+ * A hub can have a single TT for all its ports, or multiple TTs (one for each
+ * port). The bandwidth and budgeting information for the full/low-speed bus
+ * below each TT is self-contained and independent of the other TTs or the
+ * high-speed bus.
+ *
+ * "Bandwidth" refers to the number of microseconds on the FS/LS bus allocated
+ * to an interrupt or isochronous endpoint for each frame. "Budget" refers to
+ * the best-case estimate of the number of full-speed bytes allocated to an
+ * endpoint for each microframe within an allocated frame.
+ *
+ * Removal of an endpoint invalidates a TT's budget. Instead of trying to
+ * keep an up-to-date record, we recompute the budget when it is needed.
+ */
+
+struct ehci_tt {
+ u16 bandwidth[EHCI_BANDWIDTH_FRAMES];
+
+ struct list_head tt_list; /* List of all ehci_tt's */
+ struct list_head ps_list; /* Items using this TT */
+ struct usb_tt *usb_tt;
+ int tt_port; /* TT port number */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \
+ ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup);
+
+#define ehci_prepare_ports_for_controller_resume(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, false, false);
+
+/*-------------------------------------------------------------------------*/
+
#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
/*
@@ -566,24 +655,24 @@ static inline unsigned int
ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
{
if (ehci_is_TDI(ehci)) {
- switch ((portsc>>26)&3) {
+ switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) {
case 0:
return 0;
case 1:
- return (1<<USB_PORT_FEAT_LOWSPEED);
+ return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
- return (1<<USB_PORT_FEAT_HIGHSPEED);
+ return USB_PORT_STAT_HIGH_SPEED;
}
}
- return (1<<USB_PORT_FEAT_HIGHSPEED);
+ return USB_PORT_STAT_HIGH_SPEED;
}
#else
#define ehci_is_TDI(e) (0)
-#define ehci_port_speed(ehci, portsc) (1<<USB_PORT_FEAT_HIGHSPEED)
+#define ehci_port_speed(ehci, portsc) USB_PORT_STAT_HIGH_SPEED
#endif
/*-------------------------------------------------------------------------*/
@@ -605,12 +694,18 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
* This attempts to support either format at compile time without a
* runtime penalty, or both formats with the additional overhead
* of checking a flag bit.
+ *
+ * ehci_big_endian_capbase is a special quirk for controllers that
+ * implement the HC capability registers as separate registers and not
+ * as fields of a 32-bit register.
*/
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
#define ehci_big_endian_mmio(e) ((e)->big_endian_mmio)
+#define ehci_big_endian_capbase(e) ((e)->big_endian_capbase)
#else
#define ehci_big_endian_mmio(e) 0
+#define ehci_big_endian_capbase(e) 0
#endif
/*
@@ -634,6 +729,18 @@ static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
#endif
}
+#ifdef CONFIG_SOC_IMX28
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
+}
+#else
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+}
+#endif
static inline void ehci_writel(const struct ehci_hcd *ehci,
const unsigned int val, __u32 __iomem *regs)
{
@@ -642,14 +749,17 @@ static inline void ehci_writel(const struct ehci_hcd *ehci,
writel_be(val, regs) :
writel(val, regs);
#else
- writel(val, regs);
+ if (ehci->imx28_write_fix)
+ imx28_ehci_writel(val, regs);
+ else
+ writel(val, regs);
#endif
}
/*
* On certain ppc-44x SoC there is a HW issue, that could only worked around with
* explicit suspend/operate of OHCI. This function hereby makes sense only on that arch.
- * Other common bits are dependant on has_amcc_usb23 quirk flag.
+ * Other common bits are dependent on has_amcc_usb23 quirk flag.
*/
#ifdef CONFIG_44x
static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
@@ -728,10 +838,41 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
/*-------------------------------------------------------------------------*/
-#ifndef DEBUG
+#define ehci_dbg(ehci, fmt, args...) \
+ dev_dbg(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+#define ehci_err(ehci, fmt, args...) \
+ dev_err(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+#define ehci_info(ehci, fmt, args...) \
+ dev_info(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+#define ehci_warn(ehci, fmt, args...) \
+ dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+
+
+#ifndef CONFIG_DYNAMIC_DEBUG
#define STUB_DEBUG_FILES
-#endif /* DEBUG */
+#endif
/*-------------------------------------------------------------------------*/
+/* Declarations of things exported for use by ehci platform drivers */
+
+struct ehci_driver_overrides {
+ size_t extra_priv_size;
+ int (*reset)(struct usb_hcd *hcd);
+};
+
+extern void ehci_init_driver(struct hc_driver *drv,
+ const struct ehci_driver_overrides *over);
+extern int ehci_setup(struct usb_hcd *hcd);
+extern int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+ u32 mask, u32 done, int usec);
+
+#ifdef CONFIG_PM
+extern int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup);
+extern int ehci_resume(struct usb_hcd *hcd, bool hibernated);
+#endif /* CONFIG_PM */
+
+extern int ehci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
+
#endif /* __LINUX_EHCI_HCD_H */
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index 34e14edf390..f238cb37305 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -20,7 +20,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
@@ -41,7 +41,7 @@ void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
static int fhci_dfs_regs_show(struct seq_file *s, void *v)
{
struct fhci_hcd *fhci = s->private;
- struct fhci_regs __iomem *regs = fhci->regs;
+ struct qe_usb_ctlr __iomem *regs = fhci->regs;
seq_printf(s,
"mode: 0x%x\n" "addr: 0x%x\n"
@@ -50,11 +50,11 @@ static int fhci_dfs_regs_show(struct seq_file *s, void *v)
"status: 0x%x\n" "SOF timer: %d\n"
"frame number: %d\n"
"lines status: 0x%x\n",
- in_8(&regs->usb_mod), in_8(&regs->usb_addr),
- in_8(&regs->usb_comm), in_be16(&regs->usb_ep[0]),
- in_be16(&regs->usb_event), in_be16(&regs->usb_mask),
- in_8(&regs->usb_status), in_be16(&regs->usb_sof_tmr),
- in_be16(&regs->usb_frame_num),
+ in_8(&regs->usb_usmod), in_8(&regs->usb_usadr),
+ in_8(&regs->usb_uscom), in_be16(&regs->usb_usep[0]),
+ in_be16(&regs->usb_usber), in_be16(&regs->usb_usbmr),
+ in_8(&regs->usb_usbs), in_be16(&regs->usb_ussft),
+ in_be16(&regs->usb_usfrn),
fhci_ioports_check_bus_state(fhci));
return 0;
@@ -108,7 +108,7 @@ void fhci_dfs_create(struct fhci_hcd *fhci)
{
struct device *dev = fhci_to_hcd(fhci)->self.controller;
- fhci->dfs_root = debugfs_create_dir(dev->bus_id, NULL);
+ fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
if (!fhci->dfs_root) {
WARN_ON(1);
return;
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index ba622cc8a9b..1cf68eaf2ed 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -25,11 +25,14 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/slab.h>
#include <asm/qe.h>
#include <asm/fsl_gtm.h>
-#include "../core/hcd.h"
#include "fhci.h"
void fhci_start_sof_timer(struct fhci_hcd *fhci)
@@ -39,8 +42,8 @@ void fhci_start_sof_timer(struct fhci_hcd *fhci)
/* clear frame_n */
out_be16(&fhci->pram->frame_num, 0);
- out_be16(&fhci->regs->usb_sof_tmr, 0);
- setbits8(&fhci->regs->usb_mod, USB_MODE_SFTE);
+ out_be16(&fhci->regs->usb_ussft, 0);
+ setbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
fhci_dbg(fhci, "<- %s\n", __func__);
}
@@ -49,7 +52,7 @@ void fhci_stop_sof_timer(struct fhci_hcd *fhci)
{
fhci_dbg(fhci, "-> %s\n", __func__);
- clrbits8(&fhci->regs->usb_mod, USB_MODE_SFTE);
+ clrbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
gtm_stop_timer16(fhci->timer);
fhci_dbg(fhci, "<- %s\n", __func__);
@@ -57,7 +60,7 @@ void fhci_stop_sof_timer(struct fhci_hcd *fhci)
u16 fhci_get_sof_timer_count(struct fhci_usb *usb)
{
- return be16_to_cpu(in_be16(&usb->fhci->regs->usb_sof_tmr) / 12);
+ return be16_to_cpu(in_be16(&usb->fhci->regs->usb_ussft) / 12);
}
/* initialize the endpoint zero */
@@ -87,8 +90,8 @@ void fhci_usb_enable_interrupt(struct fhci_usb *usb)
enable_irq(fhci_to_hcd(fhci)->irq);
/* initialize the event register and mask register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
/* enable the timer interrupts */
enable_irq(fhci->timer->irq);
@@ -97,18 +100,18 @@ void fhci_usb_enable_interrupt(struct fhci_usb *usb)
usb->intr_nesting_cnt--;
}
-/* diable the usb interrupt */
+/* disable the usb interrupt */
void fhci_usb_disable_interrupt(struct fhci_usb *usb)
{
struct fhci_hcd *fhci = usb->fhci;
if (usb->intr_nesting_cnt == 0) {
- /* diable the timer interrupt */
+ /* disable the timer interrupt */
disable_irq_nosync(fhci->timer->irq);
/* disable the usb interrupt */
disable_irq_nosync(fhci_to_hcd(fhci)->irq);
- out_be16(&usb->fhci->regs->usb_mask, 0);
+ out_be16(&usb->fhci->regs->usb_usbmr, 0);
}
usb->intr_nesting_cnt++;
}
@@ -118,9 +121,9 @@ static u32 fhci_usb_enable(struct fhci_hcd *fhci)
{
struct fhci_usb *usb = fhci->usb_lld;
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
- setbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
+ setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
mdelay(100);
@@ -140,7 +143,7 @@ static u32 fhci_usb_disable(struct fhci_hcd *fhci)
usb->port_status == FHCI_PORT_LOW)
fhci_device_disconnected_interrupt(fhci);
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
return 0;
}
@@ -242,9 +245,10 @@ err:
static void fhci_usb_free(void *lld)
{
struct fhci_usb *usb = lld;
- struct fhci_hcd *fhci = usb->fhci;
+ struct fhci_hcd *fhci;
if (usb) {
+ fhci = usb->fhci;
fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
fhci_ep0_free(usb);
kfree(usb->actual_frame);
@@ -283,13 +287,13 @@ static int fhci_usb_init(struct fhci_hcd *fhci)
USB_E_IDLE_MASK |
USB_E_RESET_MASK | USB_E_SFT_MASK | USB_E_MSF_MASK);
- out_8(&usb->fhci->regs->usb_mod, USB_MODE_HOST | USB_MODE_EN);
+ out_8(&usb->fhci->regs->usb_usmod, USB_MODE_HOST | USB_MODE_EN);
/* clearing the mask register */
- out_be16(&usb->fhci->regs->usb_mask, 0);
+ out_be16(&usb->fhci->regs->usb_usbmr, 0);
/* initialing the event register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
if (endpoint_zero_init(usb, DEFAULT_DATA_MEM, DEFAULT_RING_LEN) != 0) {
fhci_usb_free(usb);
@@ -399,7 +403,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* 1 td fro setup,1 for ack */
size = 2;
case PIPE_BULK:
- /* one td for every 4096 bytes(can be upto 8k) */
+ /* one td for every 4096 bytes(can be up to 8k) */
size += urb->transfer_buffer_length / 4096;
/* ...add for any remaining bytes... */
if ((urb->transfer_buffer_length % 4096) != 0)
@@ -432,7 +436,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return -ENOMEM;
/* allocate the private part of the URB */
- urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags);
+ urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
if (!urb_priv->tds) {
kfree(urb_priv);
return -ENOMEM;
@@ -559,11 +563,10 @@ static const struct hc_driver fhci_driver = {
.hub_control = fhci_hub_control,
};
-static int __devinit of_fhci_probe(struct of_device *ofdev,
- const struct of_device_id *ofid)
+static int of_fhci_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
- struct device_node *node = ofdev->node;
+ struct device_node *node = dev->of_node;
struct usb_hcd *hcd;
struct fhci_hcd *fhci;
struct resource usb_regs;
@@ -583,7 +586,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
if (sprop && strcmp(sprop, "host"))
return -ENODEV;
- hcd = usb_create_hcd(&fhci_driver, dev, dev->bus_id);
+ hcd = usb_create_hcd(&fhci_driver, dev, dev_name(dev));
if (!hcd) {
dev_err(dev, "could not create hcd\n");
return -ENOMEM;
@@ -604,7 +607,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
goto err_regs;
}
- hcd->regs = ioremap(usb_regs.start, usb_regs.end - usb_regs.start + 1);
+ hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs));
if (!hcd->regs) {
dev_err(dev, "could not ioremap regs\n");
ret = -ENOMEM;
@@ -620,12 +623,15 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
goto err_pram;
}
- pram_addr = cpm_muram_alloc_fixed(iprop[2], FHCI_PRAM_SIZE);
+ pram_addr = cpm_muram_alloc(FHCI_PRAM_SIZE, 64);
if (IS_ERR_VALUE(pram_addr)) {
dev_err(dev, "failed to allocate usb pram\n");
ret = -ENOMEM;
goto err_pram;
}
+
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, QE_CR_SUBBLOCK_USB,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_addr);
fhci->pram = cpm_muram_addr(pram_addr);
/* GPIOs and pins */
@@ -650,7 +656,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
}
}
- ret = gpio_request(gpio, dev->bus_id);
+ ret = gpio_request(gpio, dev_name(dev));
if (ret) {
dev_err(dev, "failed to request gpio %d", i);
goto err_gpios;
@@ -668,7 +674,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
}
for (j = 0; j < NUM_PINS; j++) {
- fhci->pins[j] = qe_pin_request(ofdev->node, j);
+ fhci->pins[j] = qe_pin_request(node, j);
if (IS_ERR(fhci->pins[j])) {
ret = PTR_ERR(fhci->pins[j]);
dev_err(dev, "can't get pin %d: %d\n", j, ret);
@@ -685,7 +691,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
}
ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq,
- IRQF_DISABLED, "qe timer (usb)", hcd);
+ 0, "qe timer (usb)", hcd);
if (ret) {
dev_err(dev, "failed to request timer irq");
goto err_timer_irq;
@@ -741,13 +747,15 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
}
/* Clear and disable any pending interrupts. */
- out_be16(&fhci->regs->usb_event, 0xffff);
- out_be16(&fhci->regs->usb_mask, 0);
+ out_be16(&fhci->regs->usb_usber, 0xffff);
+ out_be16(&fhci->regs->usb_usbmr, 0);
- ret = usb_add_hcd(hcd, usb_irq, IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, usb_irq, 0);
if (ret < 0)
goto err_add_hcd;
+ device_wakeup_enable(hcd->self.controller);
+
fhci_dfs_create(fhci);
return 0;
@@ -776,7 +784,7 @@ err_regs:
return ret;
}
-static int __devexit fhci_remove(struct device *dev)
+static int fhci_remove(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
@@ -799,35 +807,28 @@ static int __devexit fhci_remove(struct device *dev)
return 0;
}
-static int __devexit of_fhci_remove(struct of_device *ofdev)
+static int of_fhci_remove(struct platform_device *ofdev)
{
return fhci_remove(&ofdev->dev);
}
-static struct of_device_id of_fhci_match[] = {
+static const struct of_device_id of_fhci_match[] = {
{ .compatible = "fsl,mpc8323-qe-usb", },
{},
};
MODULE_DEVICE_TABLE(of, of_fhci_match);
-static struct of_platform_driver of_fhci_driver = {
- .name = "fsl,usb-fhci",
- .match_table = of_fhci_match,
+static struct platform_driver of_fhci_driver = {
+ .driver = {
+ .name = "fsl,usb-fhci",
+ .owner = THIS_MODULE,
+ .of_match_table = of_fhci_match,
+ },
.probe = of_fhci_probe,
- .remove = __devexit_p(of_fhci_remove),
+ .remove = of_fhci_remove,
};
-static int __init fhci_module_init(void)
-{
- return of_register_platform_driver(&of_fhci_driver);
-}
-module_init(fhci_module_init);
-
-static void __exit fhci_module_exit(void)
-{
- of_unregister_platform_driver(&of_fhci_driver);
-}
-module_exit(fhci_module_exit);
+module_platform_driver(of_fhci_driver);
MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, "
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 0cfaedc3e12..6af2512f837 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -22,9 +22,9 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/gpio.h>
#include <asm/qe.h>
-#include "../core/hcd.h"
#include "fhci.h"
/* virtual root hub specific descriptor */
@@ -97,7 +97,7 @@ void fhci_port_disable(struct fhci_hcd *fhci)
/* Enable IDLE since we want to know if something comes along */
usb->saved_msk |= USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
/* check if during the disconnection process attached new device */
if (port_status == FHCI_PORT_WAITING)
@@ -158,21 +158,21 @@ void fhci_port_reset(void *lld)
fhci_stop_sof_timer(fhci);
/* disable the USB controller */
- mode = in_8(&fhci->regs->usb_mod);
- out_8(&fhci->regs->usb_mod, mode & (~USB_MODE_EN));
+ mode = in_8(&fhci->regs->usb_usmod);
+ out_8(&fhci->regs->usb_usmod, mode & (~USB_MODE_EN));
/* disable idle interrupts */
- mask = in_be16(&fhci->regs->usb_mask);
- out_be16(&fhci->regs->usb_mask, mask & (~USB_E_IDLE_MASK));
+ mask = in_be16(&fhci->regs->usb_usbmr);
+ out_be16(&fhci->regs->usb_usbmr, mask & (~USB_E_IDLE_MASK));
fhci_io_port_generate_reset(fhci);
/* enable interrupt on this endpoint */
- out_be16(&fhci->regs->usb_mask, mask);
+ out_be16(&fhci->regs->usb_usbmr, mask);
/* enable the USB controller */
- mode = in_8(&fhci->regs->usb_mod);
- out_8(&fhci->regs->usb_mod, mode | USB_MODE_EN);
+ mode = in_8(&fhci->regs->usb_usmod);
+ out_8(&fhci->regs->usb_usmod, mode | USB_MODE_EN);
fhci_start_sof_timer(fhci);
fhci_dbg(fhci, "<- %s\n", __func__);
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
index 2c0736c9971..b0b88f57a5a 100644
--- a/drivers/usb/host/fhci-mem.c
+++ b/drivers/usb/host/fhci-mem.c
@@ -18,9 +18,10 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
static void init_td(struct td *td)
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
index b0a1446ba29..03be7494a47 100644
--- a/drivers/usb/host/fhci-q.c
+++ b/drivers/usb/host/fhci-q.c
@@ -19,9 +19,10 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
/* maps the hardware error code to the USB error code */
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index bb63b68ddb7..95ca5986e67 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -1,7 +1,7 @@
/*
* Freescale QUICC Engine USB Host Controller Driver
*
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright (c) Freescale Semicondutor, Inc. 2006, 2011.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) Logic Product Development, Inc. 2007
@@ -24,9 +24,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/qe.h>
#include <asm/fsl_gtm.h>
-#include "../core/hcd.h"
#include "fhci.h"
static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
@@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
pkt->info = 0;
pkt->priv_data = NULL;
- cq_put(usb->ep0->empty_frame_Q, pkt);
+ cq_put(&usb->ep0->empty_frame_Q, pkt);
}
/* confirm submitted packet */
@@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
if ((td->data + td->actual_len) && trans_len)
memcpy(td->data + td->actual_len, pkt->data,
trans_len);
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
}
recycle_frame(usb, pkt);
@@ -125,15 +125,15 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
/*
* Flush all transmitted packets from BDs
* This routine is called when disabling the USB port to flush all
- * transmissions that are allready scheduled in the BDs
+ * transmissions that are already scheduled in the BDs
*/
void fhci_flush_all_transmissions(struct fhci_usb *usb)
{
u8 mode;
struct td *td;
- mode = in_8(&usb->fhci->regs->usb_mod);
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_EN);
+ mode = in_8(&usb->fhci->regs->usb_usmod);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
fhci_flush_bds(usb);
@@ -147,9 +147,9 @@ void fhci_flush_all_transmissions(struct fhci_usb *usb)
usb->actual_frame->frame_status = FRAME_END_TRANSMISSION;
/* reset the event register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
/* enable the USB controller */
- out_8(&usb->fhci->regs->usb_mod, mode | USB_MODE_EN);
+ out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
}
/*
@@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
}
/* update frame object fields before transmitting */
- pkt = cq_get(usb->ep0->empty_frame_Q);
+ pkt = cq_get(&usb->ep0->empty_frame_Q);
if (!pkt) {
fhci_dbg(usb->fhci, "there is no empty frame\n");
return -1;
@@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
pkt->info = 0;
if (data == NULL) {
- data = cq_get(usb->ep0->dummy_packets_Q);
+ data = cq_get(&usb->ep0->dummy_packets_Q);
BUG_ON(!data);
pkt->info = PKT_DUMMY_PACKET;
}
@@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
list_del_init(&td->frame_lh);
td->status = USB_TD_OK;
if (pkt->info & PKT_DUMMY_PACKET)
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
recycle_frame(usb, pkt);
usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
fhci_err(usb->fhci, "host transaction failed\n");
@@ -261,8 +261,7 @@ static void move_head_to_tail(struct list_head *list)
struct list_head *node = list->next;
if (!list_empty(list)) {
- list_del(node);
- list_add_tail(node, list);
+ list_move_tail(node, list);
}
}
@@ -414,7 +413,7 @@ static void sof_interrupt(struct fhci_hcd *fhci)
usb->port_status = FHCI_PORT_FULL;
/* Disable IDLE */
usb->saved_msk &= ~USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
}
gtm_set_exact_timer16(fhci->timer, usb->max_frame_usage, false);
@@ -433,14 +432,14 @@ void fhci_device_disconnected_interrupt(struct fhci_hcd *fhci)
fhci_dbg(fhci, "-> %s\n", __func__);
fhci_usb_disable_interrupt(usb);
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->port_status = FHCI_PORT_DISABLED;
fhci_stop_sof_timer(fhci);
/* Enable IDLE since we want to know if something comes along */
usb->saved_msk |= USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask, usb->saved_msk);
+ out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_CONNECTION;
usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_CONNECTION;
@@ -473,7 +472,7 @@ void fhci_device_connected_interrupt(struct fhci_hcd *fhci)
}
usb->port_status = FHCI_PORT_LOW;
- setbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
+ setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->vroot_hub->port.wPortStatus |=
(USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_CONNECTION);
@@ -491,7 +490,7 @@ void fhci_device_connected_interrupt(struct fhci_hcd *fhci)
}
usb->port_status = FHCI_PORT_FULL;
- clrbits8(&usb->fhci->regs->usb_mod, USB_MODE_LSS);
+ clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
usb->vroot_hub->port.wPortStatus &=
~USB_PORT_STAT_LOW_SPEED;
usb->vroot_hub->port.wPortStatus |=
@@ -535,7 +534,7 @@ static void abort_transmission(struct fhci_usb *usb)
/* issue stop Tx command */
qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
/* flush Tx FIFOs */
- out_8(&usb->fhci->regs->usb_comm, USB_CMD_FLUSH_FIFO | EP_ZERO);
+ out_8(&usb->fhci->regs->usb_uscom, USB_CMD_FLUSH_FIFO | EP_ZERO);
udelay(1000);
/* reset Tx BDs */
fhci_flush_bds(usb);
@@ -555,11 +554,11 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
usb = fhci->usb_lld;
- usb_er |= in_be16(&usb->fhci->regs->usb_event) &
- in_be16(&usb->fhci->regs->usb_mask);
+ usb_er |= in_be16(&usb->fhci->regs->usb_usber) &
+ in_be16(&usb->fhci->regs->usb_usbmr);
/* clear event bits for next time */
- out_be16(&usb->fhci->regs->usb_event, usb_er);
+ out_be16(&usb->fhci->regs->usb_usber, usb_er);
fhci_dbg_isr(fhci, usb_er);
@@ -573,12 +572,10 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
/* Turn on IDLE since we want to disconnect */
usb->saved_msk |= USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_event,
+ out_be16(&usb->fhci->regs->usb_usber,
usb->saved_msk);
} else if (usb->port_status == FHCI_PORT_DISABLED) {
- if (fhci_ioports_check_bus_state(fhci) == 1 &&
- usb->port_status != FHCI_PORT_LOW &&
- usb->port_status != FHCI_PORT_FULL)
+ if (fhci_ioports_check_bus_state(fhci) == 1)
fhci_device_connected_interrupt(fhci);
}
usb_er &= ~USB_E_RESET_MASK;
@@ -605,9 +602,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
}
if (usb_er & USB_E_IDLE_MASK) {
- if (usb->port_status == FHCI_PORT_DISABLED &&
- usb->port_status != FHCI_PORT_LOW &&
- usb->port_status != FHCI_PORT_FULL) {
+ if (usb->port_status == FHCI_PORT_DISABLED) {
usb_er &= ~USB_E_RESET_MASK;
fhci_device_connected_interrupt(fhci);
} else if (usb->port_status ==
@@ -615,7 +610,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
/* XXX usb->port_status = FHCI_PORT_WAITING; */
/* Disable IDLE */
usb->saved_msk &= ~USB_E_IDLE_MASK;
- out_be16(&usb->fhci->regs->usb_mask,
+ out_be16(&usb->fhci->regs->usb_usbmr,
usb->saved_msk);
} else {
fhci_dbg_isr(fhci, -1);
@@ -631,7 +626,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
/*
- * Process normal completions(error or sucess) and clean the schedule.
+ * Process normal completions(error or success) and clean the schedule.
*
* This is the main path for handing urbs back to drivers. The only other patth
* is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning
@@ -744,9 +739,13 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
}
/* for ISO transfer calculate start frame index */
- if (ed->mode == FHCI_TF_ISO && urb->transfer_flags & URB_ISO_ASAP)
- urb->start_frame = ed->td_head ? ed->last_iso + 1 :
+ if (ed->mode == FHCI_TF_ISO) {
+ /* Ignore the possibility of underruns */
+ urb->start_frame = ed->td_head ? ed->next_iso :
get_frame_num(fhci);
+ ed->next_iso = (urb->start_frame + urb->interval *
+ urb->number_of_packets) & 0x07ff;
+ }
/*
* OHCI handles the DATA toggle itself,we just use the USB
@@ -814,9 +813,11 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
ed->dev_addr = usb_pipedevice(urb->pipe);
ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
usb_pipeout(urb->pipe));
+ /* setup stage */
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
+ /* data stage */
if (data_len > 0) {
td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
@@ -824,9 +825,18 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
true);
}
- td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
- usb_pipeout(urb->pipe) ? FHCI_TA_IN : FHCI_TA_OUT,
- USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
+ /* status stage */
+ if (data_len > 0)
+ td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+ (usb_pipeout(urb->pipe) ? FHCI_TA_IN :
+ FHCI_TA_OUT),
+ USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+ else
+ td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+ FHCI_TA_IN,
+ USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
urb_state = US_CTRL_SETUP;
break;
case FHCI_TF_ISO:
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index b4033229031..1498061f0ae 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -18,10 +18,11 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
#define DUMMY_BD_BUFFER 0xdeadbeef
@@ -39,7 +40,7 @@
#define TD_RXER 0x0020 /* Rx error or not */
#define TD_NAK 0x0010 /* No ack. */
-#define TD_STAL 0x0008 /* Stall recieved */
+#define TD_STAL 0x0008 /* Stall received */
#define TD_TO 0x0004 /* time out */
#define TD_UN 0x0002 /* underrun */
#define TD_NO 0x0010 /* Rx Non Octet Aligned Packet */
@@ -105,34 +106,34 @@ void fhci_ep0_free(struct fhci_usb *usb)
if (ep->td_base)
cpm_muram_free(cpm_muram_offset(ep->td_base));
- if (ep->conf_frame_Q) {
- size = cq_howmany(ep->conf_frame_Q);
+ if (kfifo_initialized(&ep->conf_frame_Q)) {
+ size = cq_howmany(&ep->conf_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->conf_frame_Q);
+ struct packet *pkt = cq_get(&ep->conf_frame_Q);
kfree(pkt);
}
- cq_delete(ep->conf_frame_Q);
+ cq_delete(&ep->conf_frame_Q);
}
- if (ep->empty_frame_Q) {
- size = cq_howmany(ep->empty_frame_Q);
+ if (kfifo_initialized(&ep->empty_frame_Q)) {
+ size = cq_howmany(&ep->empty_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->empty_frame_Q);
+ struct packet *pkt = cq_get(&ep->empty_frame_Q);
kfree(pkt);
}
- cq_delete(ep->empty_frame_Q);
+ cq_delete(&ep->empty_frame_Q);
}
- if (ep->dummy_packets_Q) {
- size = cq_howmany(ep->dummy_packets_Q);
+ if (kfifo_initialized(&ep->dummy_packets_Q)) {
+ size = cq_howmany(&ep->dummy_packets_Q);
for (; size; size--) {
- u8 *buff = cq_get(ep->dummy_packets_Q);
+ u8 *buff = cq_get(&ep->dummy_packets_Q);
kfree(buff);
}
- cq_delete(ep->dummy_packets_Q);
+ cq_delete(&ep->dummy_packets_Q);
}
kfree(ep);
@@ -154,7 +155,7 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
struct endpoint *ep;
struct usb_td __iomem *td;
unsigned long ep_offset;
- char *err_for = "enpoint PRAM";
+ char *err_for = "endpoint PRAM";
int ep_mem_size;
u32 i;
@@ -175,10 +176,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
ep->td_base = cpm_muram_addr(ep_offset);
/* zero all queue pointers */
- ep->conf_frame_Q = cq_new(ring_len + 2);
- ep->empty_frame_Q = cq_new(ring_len + 2);
- ep->dummy_packets_Q = cq_new(ring_len + 2);
- if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) {
+ if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
+ cq_new(&ep->empty_frame_Q, ring_len + 2) ||
+ cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
err_for = "frame_queues";
goto err;
}
@@ -199,8 +199,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
err_for = "buffer";
goto err;
}
- cq_put(ep->empty_frame_Q, pkt);
- cq_put(ep->dummy_packets_Q, buff);
+ cq_put(&ep->empty_frame_Q, pkt);
+ cq_put(&ep->dummy_packets_Q, buff);
}
/* we put the endpoint parameter RAM right behind the TD ring */
@@ -249,7 +249,7 @@ void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
u8 rt;
/* set the endpoint registers according to the endpoint */
- out_be16(&usb->fhci->regs->usb_ep[0],
+ out_be16(&usb->fhci->regs->usb_usep[0],
USB_TRANS_CTR | USB_EP_MF | USB_EP_RTE);
out_be16(&usb->fhci->pram->ep_ptr[0],
cpm_muram_offset(ep->ep_pram_ptr));
@@ -271,10 +271,10 @@ void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
/*
* Collect the submitted frames and inform the application about them
- * It is also prepearing the TDs for new frames. If the Tx interrupts
- * are diabled, the application should call that routine to get
+ * It is also preparing the TDs for new frames. If the Tx interrupts
+ * are disabled, the application should call that routine to get
* confirmation about the submitted frames. Otherwise, the routine is
- * called frome the interrupt service routine during the Tx interrupt.
+ * called from the interrupt service routine during the Tx interrupt.
* In that case the application is informed by calling the application
* specific 'fhci_transaction_confirm' routine
*/
@@ -319,7 +319,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
continue;
- pkt = cq_get(ep->conf_frame_Q);
+ pkt = cq_get(&ep->conf_frame_Q);
if (!pkt)
fhci_err(usb->fhci, "no frame to confirm\n");
@@ -337,7 +337,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
pkt->status = USB_TD_RX_ER_NONOCT;
else
fhci_err(usb->fhci, "illegal error "
- "occured\n");
+ "occurred\n");
} else if (td_status & TD_NAK)
pkt->status = USB_TD_TX_ER_NAK;
else if (td_status & TD_TO)
@@ -347,7 +347,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
else if (td_status & TD_STAL)
pkt->status = USB_TD_TX_ER_STALL;
else
- fhci_err(usb->fhci, "illegal error occured\n");
+ fhci_err(usb->fhci, "illegal error occurred\n");
} else if ((extra_data & TD_TOK_IN) &&
pkt->len > td_length - CRC_SIZE) {
pkt->status = USB_TD_RX_DATA_UNDERUN;
@@ -460,10 +460,10 @@ u32 fhci_host_transaction(struct fhci_usb *usb,
out_be16(&td->length, pkt->len);
/* put the frame to the confirmation queue */
- cq_put(ep->conf_frame_Q, pkt);
+ cq_put(&ep->conf_frame_Q, pkt);
- if (cq_howmany(ep->conf_frame_Q) == 1)
- out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
+ if (cq_howmany(&ep->conf_frame_Q) == 1)
+ out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
return 0;
}
@@ -535,8 +535,8 @@ void fhci_flush_actual_frame(struct fhci_usb *usb)
struct endpoint *ep = usb->ep0;
/* disable the USB controller */
- mode = in_8(&usb->fhci->regs->usb_mod);
- out_8(&usb->fhci->regs->usb_mod, mode & ~USB_MODE_EN);
+ mode = in_8(&usb->fhci->regs->usb_usmod);
+ out_8(&usb->fhci->regs->usb_usmod, mode & ~USB_MODE_EN);
tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
td = cpm_muram_addr(tb_ptr);
@@ -571,9 +571,9 @@ void fhci_flush_actual_frame(struct fhci_usb *usb)
usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
/* reset the event register */
- out_be16(&usb->fhci->regs->usb_event, 0xffff);
+ out_be16(&usb->fhci->regs->usb_usber, 0xffff);
/* enable the USB controller */
- out_8(&usb->fhci->regs->usb_mod, mode | USB_MODE_EN);
+ out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
}
/* handles Tx confirm and Tx error interrupt */
@@ -613,7 +613,7 @@ void fhci_host_transmit_actual_frame(struct fhci_usb *usb)
/* start transmit only if we have something in the TDs */
if (in_be16(&td->status) & TD_R)
- out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
+ out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
if (in_be32(&ep->conf_td->buf_ptr) == DUMMY_BD_BUFFER) {
out_be32(&old_td->buf_ptr, 0);
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 7116284ed21..154e6a00772 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -20,13 +20,15 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/bug.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/kfifo.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/qe.h>
-#include "../core/hcd.h"
+#include <asm/immap_qe.h>
#define USB_CLOCK 48000000
@@ -81,7 +83,7 @@
#define USB_TD_RX_ER_NONOCT 0x40000000 /* Tx Non Octet Aligned Packet */
#define USB_TD_RX_ER_BITSTUFF 0x20000000 /* Frame Aborted-Received pkt */
#define USB_TD_RX_ER_CRC 0x10000000 /* CRC error */
-#define USB_TD_RX_ER_OVERUN 0x08000000 /* Over - run occured */
+#define USB_TD_RX_ER_OVERUN 0x08000000 /* Over - run occurred */
#define USB_TD_RX_ER_PID 0x04000000 /* wrong PID received */
#define USB_TD_RX_DATA_UNDERUN 0x02000000 /* shorter than expected */
#define USB_TD_RX_DATA_OVERUN 0x01000000 /* longer than expected */
@@ -172,25 +174,6 @@
#define USB_E_TXB_MASK 0x0002
#define USB_E_RXB_MASK 0x0001
-/* Freescale USB Host controller registers */
-struct fhci_regs {
- u8 usb_mod; /* mode register */
- u8 usb_addr; /* address register */
- u8 usb_comm; /* command register */
- u8 reserved1[1];
- __be16 usb_ep[4]; /* endpoint register */
- u8 reserved2[4];
- __be16 usb_event; /* event register */
- u8 reserved3[2];
- __be16 usb_mask; /* mask register */
- u8 reserved4[1];
- u8 usb_status; /* status register */
- __be16 usb_sof_tmr; /* Start Of Frame timer */
- u8 reserved5[2];
- __be16 usb_frame_num; /* frame number register */
- u8 reserved6[1];
-};
-
/* Freescale USB HOST */
struct fhci_pram {
__be16 ep_ptr[4]; /* Endpoint porter reg */
@@ -266,7 +249,7 @@ struct fhci_hcd {
int gpios[NUM_GPIOS];
bool alow_gpios[NUM_GPIOS];
- struct fhci_regs __iomem *regs; /* I/O memory used to communicate */
+ struct qe_usb_ctlr __iomem *regs; /* I/O memory used to communicate */
struct fhci_pram __iomem *pram; /* Parameter RAM */
struct gtm_timer *timer;
@@ -355,14 +338,14 @@ struct ed {
/* read only parameters, should be cleared upon initialization */
u8 toggle_carry; /* toggle carry from the last TD submitted */
- u32 last_iso; /* time stamp of last queued ISO transfer */
+ u16 next_iso; /* time stamp of next queued ISO transfer */
struct td *td_head; /* a pointer to the current TD handled */
};
struct td {
void *data; /* a pointer to the data buffer */
unsigned int len; /* length of the data to be submitted */
- unsigned int actual_len; /* actual bytes transfered on this td */
+ unsigned int actual_len; /* actual bytes transferred on this td */
enum fhci_ta_type type; /* transaction type */
u8 toggle; /* toggle for next trans. within this TD */
u16 iso_index; /* ISO transaction index */
@@ -423,9 +406,9 @@ struct endpoint {
struct usb_td __iomem *td_base; /* first TD in the ring */
struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
- struct kfifo *empty_frame_Q; /* Empty frames list to use */
- struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */
- struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */
+ struct kfifo empty_frame_Q; /* Empty frames list to use */
+ struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */
+ struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */
bool already_pushed_dummy_bd;
};
@@ -493,9 +476,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci)
}
/* fifo of pointers */
-static inline struct kfifo *cq_new(int size)
+static inline int cq_new(struct kfifo *fifo, int size)
{
- return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL);
+ return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL);
}
static inline void cq_delete(struct kfifo *kfifo)
@@ -505,19 +488,23 @@ static inline void cq_delete(struct kfifo *kfifo)
static inline unsigned int cq_howmany(struct kfifo *kfifo)
{
- return __kfifo_len(kfifo) / sizeof(void *);
+ return kfifo_len(kfifo) / sizeof(void *);
}
static inline int cq_put(struct kfifo *kfifo, void *p)
{
- return __kfifo_put(kfifo, (void *)&p, sizeof(p));
+ return kfifo_in(kfifo, (void *)&p, sizeof(p));
}
static inline void *cq_get(struct kfifo *kfifo)
{
- void *p = NULL;
+ unsigned int sz;
+ void *p;
+
+ sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
+ if (sz != sizeof(p))
+ return NULL;
- __kfifo_get(kfifo, (void *)&p, sizeof(p));
return p;
}
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
new file mode 100644
index 00000000000..98a89d16cc3
--- /dev/null
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -0,0 +1,5981 @@
+/*
+ * Faraday FOTG210 EHCI-like driver
+ *
+ * Copyright (c) 2013 Faraday Technology Corporation
+ *
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ * Feng-Hsin Chiang <john453@faraday-tech.com>
+ * Po-Yu Chuang <ratbert.chuang@gmail.com>
+ *
+ * Most of code borrowed from the Linux-3.7 EHCI driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+/*-------------------------------------------------------------------------*/
+#define DRIVER_AUTHOR "Yuan-Hsin Chen"
+#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver"
+
+static const char hcd_name[] = "fotg210_hcd";
+
+#undef FOTG210_URB_TRACE
+
+#define FOTG210_STATS
+
+/* magic numbers that can affect system performance */
+#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define FOTG210_TUNE_RL_TT 0
+#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define FOTG210_TUNE_MULT_TT 1
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh; /* 0 to 6 */
+module_param(log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting: slower than hw default */
+static unsigned park;
+module_param(park, uint, S_IRUGO);
+MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
+
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+#include "fotg210.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_dbg(fotg210, fmt, args...) \
+ dev_dbg(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_err(fotg210, fmt, args...) \
+ dev_err(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_info(fotg210, fmt, args...) \
+ dev_info(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_warn(fotg210, fmt, args...) \
+ dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+
+/* check the values in the HCSPARAMS register
+ * (host controller _Structural_ parameters)
+ * see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
+{
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+
+ fotg210_dbg(fotg210,
+ "%s hcs_params 0x%x ports=%d\n",
+ label, params,
+ HCS_N_PORTS(params)
+ );
+}
+
+/* check the values in the HCCPARAMS register
+ * (host controller _Capability_ parameters)
+ * see EHCI Spec, Table 2-5 for each value
+ * */
+static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
+{
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ fotg210_dbg(fotg210,
+ "%s hcc_params %04x uframes %s%s\n",
+ label,
+ params,
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "");
+}
+
+static void __maybe_unused
+dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
+{
+ fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+ hc32_to_cpup(fotg210, &qtd->hw_next),
+ hc32_to_cpup(fotg210, &qtd->hw_alt_next),
+ hc32_to_cpup(fotg210, &qtd->hw_token),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
+ if (qtd->hw_buf[1])
+ fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
+ hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label,
+ qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+ fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n",
+ label, itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
+ itd->urb);
+ fotg210_dbg(fotg210,
+ " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_transaction[0]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[1]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[2]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[3]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[4]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[5]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[6]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[7]));
+ fotg210_dbg(fotg210,
+ " buf: %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_bufp[0]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[1]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[2]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[3]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[4]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[5]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[6]));
+ fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n",
+ itd->index[0], itd->index[1], itd->index[2],
+ itd->index[3], itd->index[4], itd->index[5],
+ itd->index[6], itd->index[7]);
+}
+
+static int __maybe_unused
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+ return scnprintf(buf, len,
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", status,
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+ (status & STS_HALT) ? " Halt" : "",
+ (status & STS_IAA) ? " IAA" : "",
+ (status & STS_FATAL) ? " FATAL" : "",
+ (status & STS_FLR) ? " FLR" : "",
+ (status & STS_PCD) ? " PCD" : "",
+ (status & STS_ERR) ? " ERR" : "",
+ (status & STS_INT) ? " INT" : ""
+ );
+}
+
+static int __maybe_unused
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+ return scnprintf(buf, len,
+ "%s%sintrenable %02x%s%s%s%s%s%s",
+ label, label[0] ? " " : "", enable,
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+ (enable & STS_PCD) ? " PCD" : "",
+ (enable & STS_ERR) ? " ERR" : "",
+ (enable & STS_INT) ? " INT" : ""
+ );
+}
+
+static const char *const fls_strings[] = { "1024", "512", "256", "??" };
+
+static int
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{
+ return scnprintf(buf, len,
+ "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
+ "period=%s%s %s",
+ label, label[0] ? " " : "", command,
+ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT(command),
+ (command >> 16) & 0x3f,
+ (command & CMD_IAAD) ? " IAAD" : "",
+ (command & CMD_ASE) ? " Async" : "",
+ (command & CMD_PSE) ? " Periodic" : "",
+ fls_strings[(command >> 2) & 0x3],
+ (command & CMD_RESET) ? " Reset" : "",
+ (command & CMD_RUN) ? "RUN" : "HALT"
+ );
+}
+
+static char
+*dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{
+ char *sig;
+
+ /* signaling state */
+ switch (status & (3 << 10)) {
+ case 0 << 10:
+ sig = "se0";
+ break;
+ case 1 << 10:
+ sig = "k";
+ break; /* low speed */
+ case 2 << 10:
+ sig = "j";
+ break;
+ default:
+ sig = "?";
+ break;
+ }
+
+ scnprintf(buf, len,
+ "%s%sport:%d status %06x %d "
+ "sig=%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", port, status,
+ status>>25,/*device address */
+ sig,
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+ (status & PORT_PEC) ? " PEC" : "",
+ (status & PORT_PE) ? " PE" : "",
+ (status & PORT_CSC) ? " CSC" : "",
+ (status & PORT_CONNECT) ? " CONNECT" : "");
+ return buf;
+}
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(fotg210, label, status) { \
+ char _buf[80]; \
+ dbg_status_buf(_buf, sizeof(_buf), label, status); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
+}
+
+#define dbg_cmd(fotg210, label, command) { \
+ char _buf[80]; \
+ dbg_command_buf(_buf, sizeof(_buf), label, command); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
+}
+
+#define dbg_port(fotg210, label, port, status) { \
+ char _buf[80]; \
+ fotg210_dbg(fotg210, "%s\n", dbg_port_buf(_buf, sizeof(_buf), label, port, status) ); \
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* troubleshooting help: expose state in debugfs */
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_async_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_periodic_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_registers_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+
+static struct dentry *fotg210_debug_root;
+
+struct debug_buffer {
+ ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
+ struct usb_bus *bus;
+ struct mutex mutex; /* protect filling of buffer */
+ size_t count; /* number of characters filled into buffer */
+ char *output_buf;
+ size_t alloc_size;
+};
+
+#define speed_char(info1)({ char tmp; \
+ switch (info1 & (3 << 12)) { \
+ case QH_FULL_SPEED: \
+ tmp = 'f'; break; \
+ case QH_LOW_SPEED: \
+ tmp = 'l'; break; \
+ case QH_HIGH_SPEED: \
+ tmp = 'h'; break; \
+ default: \
+ tmp = '?'; break; \
+ } tmp; })
+
+static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
+{
+ __u32 v = hc32_to_cpu(fotg210, token);
+
+ if (v & QTD_STS_ACTIVE)
+ return '*';
+ if (v & QTD_STS_HALT)
+ return '-';
+ if (!IS_SHORT_READ(v))
+ return ' ';
+ /* tries to advance through hw_alt_next */
+ return '/';
+}
+
+static void qh_lines(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh,
+ char **nextp,
+ unsigned *sizep
+)
+{
+ u32 scratch;
+ u32 hw_curr;
+ struct fotg210_qtd *td;
+ unsigned temp;
+ unsigned size = *sizep;
+ char *next = *nextp;
+ char mark;
+ __le32 list_end = FOTG210_LIST_END(fotg210);
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
+ mark = '@';
+ else
+ mark = token_mark(fotg210, hw->hw_token);
+ if (mark == '/') { /* qh_alt_next controls qh advance? */
+ if ((hw->hw_alt_next & QTD_MASK(fotg210))
+ == fotg210->async->hw->hw_alt_next)
+ mark = '#'; /* blocked */
+ else if (hw->hw_alt_next == list_end)
+ mark = '.'; /* use hw_qtd_next */
+ /* else alt_next points to some other qtd */
+ }
+ scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0;
+ temp = scnprintf(next, size,
+ "qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)",
+ qh, scratch & 0x007f,
+ speed_char(scratch),
+ (scratch >> 8) & 0x000f,
+ scratch, hc32_to_cpup(fotg210, &hw->hw_info2),
+ hc32_to_cpup(fotg210, &hw->hw_token), mark,
+ (cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token)
+ ? "data1" : "data0",
+ (hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f);
+ size -= temp;
+ next += temp;
+
+ /* hc may be modifying the list as we read it ... */
+ list_for_each_entry(td, &qh->qtd_list, qtd_list) {
+ scratch = hc32_to_cpup(fotg210, &td->hw_token);
+ mark = ' ';
+ if (hw_curr == td->qtd_dma)
+ mark = '*';
+ else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma))
+ mark = '+';
+ else if (QTD_LENGTH(scratch)) {
+ if (td->hw_alt_next == fotg210->async->hw->hw_alt_next)
+ mark = '#';
+ else if (td->hw_alt_next != list_end)
+ mark = '/';
+ }
+ temp = snprintf(next, size,
+ "\n\t%p%c%s len=%d %08x urb %p",
+ td, mark, ({ char *tmp;
+ switch ((scratch>>8)&0x03) {
+ case 0:
+ tmp = "out";
+ break;
+ case 1:
+ tmp = "in";
+ break;
+ case 2:
+ tmp = "setup";
+ break;
+ default:
+ tmp = "?";
+ break;
+ } tmp; }),
+ (scratch >> 16) & 0x7fff,
+ scratch,
+ td->urb);
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+ if (temp == size)
+ goto done;
+ }
+
+ temp = snprintf(next, size, "\n");
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+
+done:
+ *sizep = size;
+ *nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size;
+ char *next;
+ struct fotg210_qh *qh;
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ /* dumps a snapshot of the async schedule.
+ * usually empty except for long-term bulk reads, or head.
+ * one QH per line, and TDs we know about
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
+ qh = qh->qh_next.qh)
+ qh_lines(fotg210, qh, &next, &size);
+ if (fotg210->async_unlink && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
+ size -= temp;
+ next += temp;
+
+ for (qh = fotg210->async_unlink; size > 0 && qh;
+ qh = qh->unlink_next)
+ qh_lines(fotg210, qh, &next, &size);
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ return strlen(buf->output_buf);
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ union fotg210_shadow p, *seen;
+ unsigned temp, size, seen_count;
+ char *next;
+ unsigned i;
+ __hc32 tag;
+
+ seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC);
+ if (!seen)
+ return 0;
+ seen_count = 0;
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size);
+ size -= temp;
+ next += temp;
+
+ /* dump a snapshot of the periodic schedule.
+ * iso changes, interrupt usually doesn't.
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (i = 0; i < fotg210->periodic_size; i++) {
+ p = fotg210->pshadow[i];
+ if (likely(!p.ptr))
+ continue;
+ tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
+
+ temp = scnprintf(next, size, "%4d: ", i);
+ size -= temp;
+ next += temp;
+
+ do {
+ struct fotg210_qh_hw *hw;
+
+ switch (hc32_to_cpu(fotg210, tag)) {
+ case Q_TYPE_QH:
+ hw = p.qh->hw;
+ temp = scnprintf(next, size, " qh%d-%04x/%p",
+ p.qh->period,
+ hc32_to_cpup(fotg210,
+ &hw->hw_info2)
+ /* uframe masks */
+ & (QH_CMASK | QH_SMASK),
+ p.qh);
+ size -= temp;
+ next += temp;
+ /* don't repeat what follows this qh */
+ for (temp = 0; temp < seen_count; temp++) {
+ if (seen[temp].ptr != p.ptr)
+ continue;
+ if (p.qh->qh_next.ptr) {
+ temp = scnprintf(next, size,
+ " ...");
+ size -= temp;
+ next += temp;
+ }
+ break;
+ }
+ /* show more info the first time around */
+ if (temp == seen_count) {
+ u32 scratch = hc32_to_cpup(fotg210,
+ &hw->hw_info1);
+ struct fotg210_qtd *qtd;
+ char *type = "";
+
+ /* count tds, get ep direction */
+ temp = 0;
+ list_for_each_entry(qtd,
+ &p.qh->qtd_list,
+ qtd_list) {
+ temp++;
+ switch (0x03 & (hc32_to_cpu(
+ fotg210,
+ qtd->hw_token) >> 8)) {
+ case 0:
+ type = "out";
+ continue;
+ case 1:
+ type = "in";
+ continue;
+ }
+ }
+
+ temp = scnprintf(next, size,
+ "(%c%d ep%d%s "
+ "[%d/%d] q%d p%d)",
+ speed_char(scratch),
+ scratch & 0x007f,
+ (scratch >> 8) & 0x000f, type,
+ p.qh->usecs, p.qh->c_usecs,
+ temp,
+ 0x7ff & (scratch >> 16));
+
+ if (seen_count < DBG_SCHED_LIMIT)
+ seen[seen_count++].qh = p.qh;
+ } else
+ temp = 0;
+ tag = Q_NEXT_TYPE(fotg210, hw->hw_next);
+ p = p.qh->qh_next;
+ break;
+ case Q_TYPE_FSTN:
+ temp = scnprintf(next, size,
+ " fstn-%8x/%p", p.fstn->hw_prev,
+ p.fstn);
+ tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
+ p = p.fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ temp = scnprintf(next, size,
+ " itd/%p", p.itd);
+ tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
+ p = p.itd->itd_next;
+ break;
+ }
+ size -= temp;
+ next += temp;
+ } while (p.ptr);
+
+ temp = scnprintf(next, size, "\n");
+ size -= temp;
+ next += temp;
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ kfree(seen);
+
+ return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct fotg210_hcd *fotg210)
+{
+ switch (fotg210->rh_state) {
+ case FOTG210_RH_HALTED:
+ return "halted";
+ case FOTG210_RH_SUSPENDED:
+ return "suspended";
+ case FOTG210_RH_RUNNING:
+ return "running";
+ case FOTG210_RH_STOPPING:
+ return "stopping";
+ }
+ return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size, i;
+ char *next, scratch[80];
+ static const char fmt[] = "%*s\n";
+ static const char label[] = "";
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ size = scnprintf(next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "SUSPENDED(no register access)\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc);
+ goto done;
+ }
+
+ /* Capability Registers */
+ i = HC_VERSION(fotg210, fotg210_readl(fotg210,
+ &fotg210->caps->hc_capbase));
+ temp = scnprintf(next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "EHCI %x.%02x, rh state %s\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc,
+ i >> 8, i & 0x0ff, rh_state_string(fotg210));
+ size -= temp;
+ next += temp;
+
+ /* FIXME interpret both types of params */
+ i = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+ temp = scnprintf(next, size, "structural params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ i = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+ temp = scnprintf(next, size, "capability params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ /* Operational Registers */
+ temp = dbg_status_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->status));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_command_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->command));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_intr_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->intr_enable));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size, "uframe %04x\n",
+ fotg210_read_frame_index(fotg210));
+ size -= temp;
+ next += temp;
+
+ if (fotg210->async_unlink) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ fotg210->async_unlink);
+ size -= temp;
+ next += temp;
+ }
+
+#ifdef FOTG210_STATS
+ temp = scnprintf(next, size,
+ "irq normal %ld err %ld iaa %ld(lost %ld)\n",
+ fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
+ fotg210->stats.lost_iaa);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size, "complete %ld unlink %ld\n",
+ fotg210->stats.complete, fotg210->stats.unlink);
+ size -= temp;
+ next += temp;
+#endif
+
+done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ return buf->alloc_size - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
+ ssize_t (*fill_func)(struct debug_buffer *))
+{
+ struct debug_buffer *buf;
+
+ buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+ if (buf) {
+ buf->bus = bus;
+ buf->fill_func = fill_func;
+ mutex_init(&buf->mutex);
+ buf->alloc_size = PAGE_SIZE;
+ }
+
+ return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+ int ret = 0;
+
+ if (!buf->output_buf)
+ buf->output_buf = vmalloc(buf->alloc_size);
+
+ if (!buf->output_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = buf->fill_func(buf);
+
+ if (ret >= 0) {
+ buf->count = ret;
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+ size_t len, loff_t *offset)
+{
+ struct debug_buffer *buf = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&buf->mutex);
+ if (buf->count == 0) {
+ ret = fill_buffer(buf);
+ if (ret != 0) {
+ mutex_unlock(&buf->mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&buf->mutex);
+
+ ret = simple_read_from_buffer(user_buf, len, offset,
+ buf->output_buf, buf->count);
+
+out:
+ return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf = file->private_data;
+
+ if (buf) {
+ vfree(buf->output_buf);
+ kfree(buf);
+ }
+
+ return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf;
+ buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+ file->private_data = buf;
+ return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_registers_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files(struct fotg210_hcd *fotg210)
+{
+ struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
+
+ fotg210->debug_dir = debugfs_create_dir(bus->bus_name,
+ fotg210_debug_root);
+ if (!fotg210->debug_dir)
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(fotg210->debug_dir);
+}
+
+static inline void remove_debug_files(struct fotg210_hcd *fotg210)
+{
+ debugfs_remove_recursive(fotg210->debug_dir);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown: shutting down the bridge before the devices using it.
+ */
+static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = fotg210_readl(fotg210, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_halt(struct fotg210_hcd *fotg210)
+{
+ u32 temp;
+
+ spin_lock_irq(&fotg210->lock);
+
+ /* disable any irqs left enabled by previous code */
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+
+ /*
+ * This routine gets called during probe before fotg210->command
+ * has been initialized, so we can't rely on its value.
+ */
+ fotg210->command &= ~CMD_RUN;
+ temp = fotg210_readl(fotg210, &fotg210->regs->command);
+ temp &= ~(CMD_RUN | CMD_IAAD);
+ fotg210_writel(fotg210, temp, &fotg210->regs->command);
+
+ spin_unlock_irq(&fotg210->lock);
+ synchronize_irq(fotg210_to_hcd(fotg210)->irq);
+
+ return handshake(fotg210, &fotg210->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
+}
+
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_reset(struct fotg210_hcd *fotg210)
+{
+ int retval;
+ u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
+
+ /* If the EHCI debug controller is active, special care must be
+ * taken before and after a host controller reset */
+ if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
+ fotg210->debug = NULL;
+
+ command |= CMD_RESET;
+ dbg_cmd(fotg210, "reset", command);
+ fotg210_writel(fotg210, command, &fotg210->regs->command);
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210->next_statechange = jiffies;
+ retval = handshake(fotg210, &fotg210->regs->command,
+ CMD_RESET, 0, 250 * 1000);
+
+ if (retval)
+ return retval;
+
+ if (fotg210->debug)
+ dbgp_external_startup(fotg210_to_hcd(fotg210));
+
+ fotg210->port_c_suspend = fotg210->suspended_ports =
+ fotg210->resuming_ports = 0;
+ return retval;
+}
+
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_quiesce(struct fotg210_hcd *fotg210)
+{
+ u32 temp;
+
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ /* wait for any schedule enables/disables to take effect */
+ temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
+ handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
+ 16 * 125);
+
+ /* then disable anything that's still active */
+ spin_lock_irq(&fotg210->lock);
+ fotg210->command &= ~(CMD_ASE | CMD_PSE);
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+ spin_unlock_irq(&fotg210->lock);
+
+ /* hardware can take 16 microframes to turn off ... */
+ handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
+ 16 * 125);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void end_unlink_async(struct fotg210_hcd *fotg210);
+static void unlink_empty_async(struct fotg210_hcd *fotg210);
+static void fotg210_work(struct fotg210_hcd *fotg210);
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh);
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+ fotg210->command |= bit;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+ /* unblock posted write */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+ fotg210->command &= ~bit;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+ /* unblock posted write */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from fotg210->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * fotg210->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */
+ 6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &fotg210->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ fotg210->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < fotg210->next_hrtimer_event) {
+ fotg210->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&fotg210->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ want = (fotg210->command & CMD_ASE) ? STS_ASS : 0;
+ actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fotg210->ASS_poll_count++ < 20) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
+ true);
+ return;
+ }
+ fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fotg210->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fotg210->async_count > 0)
+ fotg210_set_command_bit(fotg210, CMD_ASE);
+
+ } else { /* Running */
+ if (fotg210->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
+{
+ fotg210_clear_command_bit(fotg210, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ want = (fotg210->command & CMD_PSE) ? STS_PSS : 0;
+ actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fotg210->PSS_poll_count++ < 20) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
+ true);
+ return;
+ }
+ fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fotg210->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fotg210->periodic_count > 0)
+ fotg210_set_command_bit(fotg210, CMD_PSE);
+
+ } else { /* Running */
+ if (fotg210->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void fotg210_disable_PSE(struct fotg210_hcd *fotg210)
+{
+ fotg210_clear_command_bit(fotg210, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
+{
+ if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (fotg210->died_poll_count++ < 5) {
+ /* Try again later */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+ fotg210_work(fotg210);
+ end_unlink_async(fotg210);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
+{
+ bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ fotg210->intr_unlinking = true;
+ while (fotg210->intr_unlink) {
+ struct fotg210_qh *qh = fotg210->intr_unlink;
+
+ if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
+ break;
+ fotg210->intr_unlink = qh->unlink_next;
+ qh->unlink_next = NULL;
+ end_unlink_intr(fotg210, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (fotg210->intr_unlink) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+ true);
+ ++fotg210->intr_unlink_cycle;
+ }
+ fotg210->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct fotg210_hcd *fotg210)
+{
+ if (!(fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_FREE_ITDS))) {
+ fotg210->last_itd_to_free = list_entry(
+ fotg210->cached_itd_list.prev,
+ struct fotg210_itd, itd_list);
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_itd *itd, *n;
+
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ fotg210->last_itd_to_free = NULL;
+
+ list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma);
+ if (itd == fotg210->last_itd_to_free)
+ break;
+ }
+
+ if (!list_empty(&fotg210->cached_itd_list))
+ start_free_itds(fotg210);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (fotg210->async_iaa) {
+ u32 cmd, status;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = fotg210_readl(fotg210, &fotg210->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(fotg210->stats.lost_iaa);
+ fotg210_writel(fotg210, STS_IAA,
+ &fotg210->regs->status);
+ }
+
+ fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
+ status, cmd);
+ end_unlink_async(fotg210);
+ }
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING ||
+ (fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
+ fotg210->async_count + fotg210->intr_count > 0))
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
+ true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum fotg210_hrtimer_event in fotg210.h.
+ */
+static void (*event_handlers[])(struct fotg210_hcd *) = {
+ fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */
+ fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */
+ fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */
+ fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */
+ unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
+ fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */
+ fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
+ fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */
+ fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
+{
+ struct fotg210_hcd *fotg210 =
+ container_of(t, struct fotg210_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ events = fotg210->enabled_hrtimer_events;
+ fotg210->enabled_hrtimer_events = 0;
+ fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= fotg210->hr_timeouts[e].tv64)
+ event_handlers[e](fotg210);
+ else
+ fotg210_enable_event(fotg210, e, false);
+ }
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return HRTIMER_NORESTART;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_bus_suspend NULL
+#define fotg210_bus_resume NULL
+
+/*-------------------------------------------------------------------------*/
+
+static int check_reset_complete(
+ struct fotg210_hcd *fotg210,
+ int index,
+ u32 __iomem *status_reg,
+ int port_status
+) {
+ if (!(port_status & PORT_CONNECT))
+ return port_status;
+
+ /* if reset finished and it's still not enabled -- handoff */
+ if (!(port_status & PORT_PE)) {
+ /* with integrated TT, there's nobody to hand it to! */
+ fotg210_dbg(fotg210,
+ "Failed to enable port %d on root hub TT\n",
+ index+1);
+ return port_status;
+ } else {
+ fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
+ index + 1);
+ }
+
+ return port_status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp, status;
+ u32 mask;
+ int retval = 1;
+ unsigned long flags;
+
+ /* init status to no-changes */
+ buf[0] = 0;
+
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = fotg210->resuming_ports;
+
+ mask = PORT_CSC | PORT_PEC;
+ /* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */
+
+ /* no hub change reports (bit 0) for now (power, ...) */
+
+ /* port N changes (bit N)? */
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ temp = fotg210_readl(fotg210, &fotg210->regs->port_status);
+
+ /*
+ * Return status information even for ports with OWNER set.
+ * Otherwise khubd wouldn't see the disconnect event when a
+ * high-speed device is switched over to the companion
+ * controller by the user.
+ */
+
+ if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend)
+ || (fotg210->reset_done[0] && time_after_eq(
+ jiffies, fotg210->reset_done[0]))) {
+ buf[0] |= 1 << 1;
+ status = STS_PCD;
+ }
+ /* FIXME autosuspend idle root hubs */
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return status ? retval : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+fotg210_hub_descriptor(
+ struct fotg210_hcd *fotg210,
+ struct usb_hub_descriptor *desc
+) {
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u16 temp;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+ temp = 0x0008; /* per-port overcurrent reporting */
+ temp |= 0x0002; /* no power switching */
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fotg210_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+) {
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+ u32 temp, temp1, status;
+ unsigned long flags;
+ int retval = 0;
+ unsigned selector;
+
+ /*
+ * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+ * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+ * (track current state ourselves) ... blink for diagnostics,
+ * power, "this is the one", etc. EHCI spec supports this.
+ */
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fotg210_readl(fotg210, status_reg);
+ temp &= ~PORT_RWC_BITS;
+
+ /*
+ * Even if OWNER is set, so the port is owned by the
+ * companion controller, khubd needs to be able to clear
+ * the port-change status bits (especially
+ * USB_PORT_STAT_C_CONNECTION).
+ */
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ fotg210_writel(fotg210, temp & ~PORT_PE, status_reg);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ fotg210_writel(fotg210, temp | PORT_PEC, status_reg);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ if (temp & PORT_RESET)
+ goto error;
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* resume signaling for 20 msec */
+ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fotg210->port_c_suspend);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ fotg210_writel(fotg210, temp | PORT_CSC, status_reg);
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ fotg210_writel(fotg210, temp | OTGISR_OVC,
+ &fotg210->regs->otgisr);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* GetPortStatus clears reset */
+ break;
+ default:
+ goto error;
+ }
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ break;
+ case GetHubDescriptor:
+ fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
+ buf);
+ break;
+ case GetHubStatus:
+ /* no hub-wide feature/status flags */
+ memset(buf, 0, 4);
+ /*cpu_to_le32s ((u32 *) buf); */
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ temp = fotg210_readl(fotg210, status_reg);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+ temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+ if (temp1 & OTGISR_OVC)
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /* whoever resumes must GetPortStatus to complete it!! */
+ if (temp & PORT_RESUME) {
+
+ /* Remote Wakeup received? */
+ if (!fotg210->reset_done[wIndex]) {
+ /* resume signaling for 20 msec */
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ /* check the port again */
+ mod_timer(&fotg210_to_hcd(fotg210)->rh_timer,
+ fotg210->reset_done[wIndex]);
+ }
+
+ /* resume completed? */
+ else if (time_after_eq(jiffies,
+ fotg210->reset_done[wIndex])) {
+ clear_bit(wIndex, &fotg210->suspended_ports);
+ set_bit(wIndex, &fotg210->port_c_suspend);
+ fotg210->reset_done[wIndex] = 0;
+
+ /* stop resume signaling */
+ temp = fotg210_readl(fotg210, status_reg);
+ fotg210_writel(fotg210,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME),
+ status_reg);
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ retval = handshake(fotg210, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ fotg210_err(fotg210,
+ "port %d resume error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+ temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ }
+ }
+
+ /* whoever resets must GetPortStatus to complete it!! */
+ if ((temp & PORT_RESET)
+ && time_after_eq(jiffies,
+ fotg210->reset_done[wIndex])) {
+ status |= USB_PORT_STAT_C_RESET << 16;
+ fotg210->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fotg210->resuming_ports);
+
+ /* force reset to complete */
+ fotg210_writel(fotg210,
+ temp & ~(PORT_RWC_BITS | PORT_RESET),
+ status_reg);
+ /* REVISIT: some hardware needs 550+ usec to clear
+ * this bit; seems too long to spin routinely...
+ */
+ retval = handshake(fotg210, status_reg,
+ PORT_RESET, 0, 1000);
+ if (retval != 0) {
+ fotg210_err(fotg210, "port %d reset error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+
+ /* see what we found out */
+ temp = check_reset_complete(fotg210, wIndex, status_reg,
+ fotg210_readl(fotg210, status_reg));
+ }
+
+ if (!(temp & (PORT_RESUME|PORT_RESET))) {
+ fotg210->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ }
+
+ /* transfer dedicated ports to the companion hc */
+ if ((temp & PORT_CONNECT) &&
+ test_bit(wIndex, &fotg210->companion_ports)) {
+ temp &= ~PORT_RWC_BITS;
+ fotg210_writel(fotg210, temp, status_reg);
+ fotg210_dbg(fotg210, "port %d --> companion\n",
+ wIndex + 1);
+ temp = fotg210_readl(fotg210, status_reg);
+ }
+
+ /*
+ * Even if OWNER is set, there's no harm letting khubd
+ * see the wPortStatus values (they should all be 0 except
+ * for PORT_POWER anyway).
+ */
+
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= fotg210_port_speed(fotg210, temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+
+ /* maybe the port was unsuspended without our knowledge */
+ if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+ status |= USB_PORT_STAT_SUSPEND;
+ } else if (test_bit(wIndex, &fotg210->suspended_ports)) {
+ clear_bit(wIndex, &fotg210->suspended_ports);
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ fotg210->reset_done[wIndex] = 0;
+ if (temp & PORT_PE)
+ set_bit(wIndex, &fotg210->port_c_suspend);
+ }
+
+ temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+ if (temp1 & OTGISR_OVC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (test_bit(wIndex, &fotg210->port_c_suspend))
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
+ put_unaligned_le32(status, buf);
+ break;
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetPortFeature:
+ selector = wIndex >> 8;
+ wIndex &= 0xff;
+
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fotg210_readl(fotg210, status_reg);
+ temp &= ~PORT_RWC_BITS;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if ((temp & PORT_PE) == 0
+ || (temp & PORT_RESET) != 0)
+ goto error;
+
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have hostpc feature
+ */
+ fotg210_writel(fotg210, temp | PORT_SUSPEND,
+ status_reg);
+ set_bit(wIndex, &fotg210->suspended_ports);
+ break;
+ case USB_PORT_FEAT_RESET:
+ if (temp & PORT_RESUME)
+ goto error;
+ /* line status bits may report this as low speed,
+ * which can be fine if this root hub has a
+ * transaction translator built in.
+ */
+ fotg210_dbg(fotg210, "port %d reset\n", wIndex + 1);
+ temp |= PORT_RESET;
+ temp &= ~PORT_PE;
+
+ /*
+ * caller must wait, then call GetPortStatus
+ * usb 2.0 spec says 50 ms resets on root
+ */
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(50);
+ fotg210_writel(fotg210, temp, status_reg);
+ break;
+
+ /* For downstream facing ports (these): one hub port is put
+ * into test mode according to USB2 11.24.2.13, then the hub
+ * must be reset (which for root hub now means rmmod+modprobe,
+ * or else system reboot). See EHCI 2.3.9 and 4.14 for info
+ * about the EHCI-specific stuff.
+ */
+ case USB_PORT_FEAT_TEST:
+ if (!selector || selector > 5)
+ goto error;
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ fotg210_quiesce(fotg210);
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ /* Put all enabled ports into suspend */
+ temp = fotg210_readl(fotg210, status_reg) &
+ ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ fotg210_writel(fotg210, temp | PORT_SUSPEND,
+ status_reg);
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ fotg210_halt(fotg210);
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ temp = fotg210_readl(fotg210, status_reg);
+ temp |= selector << 16;
+ fotg210_writel(fotg210, temp, status_reg);
+ break;
+
+ default:
+ goto error;
+ }
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ break;
+
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return retval;
+}
+
+static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd,
+ int portnum)
+{
+ return;
+}
+
+static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
+ int portnum)
+{
+ return 0;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * There's basically three types of memory:
+ * - data used only by the HCD ... kmalloc is fine
+ * - async and periodic schedules, shared by HC and HCD ... these
+ * need to use dma_pool or dma_alloc_coherent
+ * - driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* Allocate the key transfer structures from the previously allocated pool */
+
+static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
+ struct fotg210_qtd *qtd, dma_addr_t dma)
+{
+ memset(qtd, 0, sizeof(*qtd));
+ qtd->qtd_dma = dma;
+ qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+ qtd->hw_next = FOTG210_LIST_END(fotg210);
+ qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+ INIT_LIST_HEAD(&qtd->qtd_list);
+}
+
+static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
+ gfp_t flags)
+{
+ struct fotg210_qtd *qtd;
+ dma_addr_t dma;
+
+ qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
+ if (qtd != NULL)
+ fotg210_qtd_init(fotg210, qtd, dma);
+
+ return qtd;
+}
+
+static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
+ struct fotg210_qtd *qtd)
+{
+ dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ /* clean qtds first, and know this is not linked */
+ if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
+ fotg210_dbg(fotg210, "unused qh not empty!\n");
+ BUG();
+ }
+ if (qh->dummy)
+ fotg210_qtd_free(fotg210, qh->dummy);
+ dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+ kfree(qh);
+}
+
+static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
+ gfp_t flags)
+{
+ struct fotg210_qh *qh;
+ dma_addr_t dma;
+
+ qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
+ if (!qh)
+ goto done;
+ qh->hw = (struct fotg210_qh_hw *)
+ dma_pool_alloc(fotg210->qh_pool, flags, &dma);
+ if (!qh->hw)
+ goto fail;
+ memset(qh->hw, 0, sizeof(*qh->hw));
+ qh->qh_dma = dma;
+ INIT_LIST_HEAD(&qh->qtd_list);
+
+ /* dummy td enables safe urb queuing */
+ qh->dummy = fotg210_qtd_alloc(fotg210, flags);
+ if (qh->dummy == NULL) {
+ fotg210_dbg(fotg210, "no dummy td\n");
+ goto fail1;
+ }
+done:
+ return qh;
+fail1:
+ dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+fail:
+ kfree(qh);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->async)
+ qh_destroy(fotg210, fotg210->async);
+ fotg210->async = NULL;
+
+ if (fotg210->dummy)
+ qh_destroy(fotg210, fotg210->dummy);
+ fotg210->dummy = NULL;
+
+ /* DMA consistent memory and pools */
+ if (fotg210->qtd_pool)
+ dma_pool_destroy(fotg210->qtd_pool);
+ fotg210->qtd_pool = NULL;
+
+ if (fotg210->qh_pool) {
+ dma_pool_destroy(fotg210->qh_pool);
+ fotg210->qh_pool = NULL;
+ }
+
+ if (fotg210->itd_pool)
+ dma_pool_destroy(fotg210->itd_pool);
+ fotg210->itd_pool = NULL;
+
+ if (fotg210->periodic)
+ dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
+ fotg210->periodic_size * sizeof(u32),
+ fotg210->periodic, fotg210->periodic_dma);
+ fotg210->periodic = NULL;
+
+ /* shadow periodic table */
+ kfree(fotg210->pshadow);
+ fotg210->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
+{
+ int i;
+
+ /* QTDs for control/bulk/intr transfers */
+ fotg210->qtd_pool = dma_pool_create("fotg210_qtd",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_qtd),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->qtd_pool)
+ goto fail;
+
+ /* QHs for control/bulk/intr transfers */
+ fotg210->qh_pool = dma_pool_create("fotg210_qh",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_qh_hw),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->qh_pool)
+ goto fail;
+
+ fotg210->async = fotg210_qh_alloc(fotg210, flags);
+ if (!fotg210->async)
+ goto fail;
+
+ /* ITD for high speed ISO transfers */
+ fotg210->itd_pool = dma_pool_create("fotg210_itd",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_itd),
+ 64 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->itd_pool)
+ goto fail;
+
+ /* Hardware periodic table */
+ fotg210->periodic = (__le32 *)
+ dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
+ fotg210->periodic_size * sizeof(__le32),
+ &fotg210->periodic_dma, 0);
+ if (fotg210->periodic == NULL)
+ goto fail;
+
+ for (i = 0; i < fotg210->periodic_size; i++)
+ fotg210->periodic[i] = FOTG210_LIST_END(fotg210);
+
+ /* software shadow of hardware table */
+ fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
+ flags);
+ if (fotg210->pshadow != NULL)
+ return 0;
+
+fail:
+ fotg210_dbg(fotg210, "couldn't init memory\n");
+ fotg210_mem_cleanup(fotg210);
+ return -ENOMEM;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number). We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint. URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd) records, and (along with
+ * interrupts) needs careful scheduling. Performance improvements can be
+ * an ongoing challenge. That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries. TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+
+static int
+qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf,
+ size_t len, int token, int maxpacket)
+{
+ int i, count;
+ u64 addr = buf;
+
+ /* one buffer entry per 4K ... first might be short or unaligned */
+ qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
+ count = 0x1000 - (buf & 0x0fff); /* rest of that page */
+ if (likely(len < count)) /* ... iff needed */
+ count = len;
+ else {
+ buf += 0x1000;
+ buf &= ~0x0fff;
+
+ /* per-qtd limit: from 16K to 20K (best alignment) */
+ for (i = 1; count < len && i < 5; i++) {
+ addr = buf;
+ qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
+ (u32)(addr >> 32));
+ buf += 0x1000;
+ if ((count + 0x1000) < len)
+ count += 0x1000;
+ else
+ count = len;
+ }
+
+ /* short packets may only terminate transfers */
+ if (count != len)
+ count -= (count % maxpacket);
+ }
+ qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
+ qtd->length = count;
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ struct fotg210_qtd *qtd)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ /* writes to an active overlay are unsafe */
+ BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+ hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ hw->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+ /* Except for control endpoints, we make hardware maintain data
+ * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+ * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+ * ever clear it.
+ */
+ if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
+ unsigned is_out, epnum;
+
+ is_out = qh->is_out;
+ epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
+ if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
+ hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE);
+ usb_settoggle(qh->dev, epnum, is_out, 1);
+ }
+ }
+
+ hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void
+qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qtd *qtd;
+
+ if (list_empty(&qh->qtd_list))
+ qtd = qh->dummy;
+ else {
+ qtd = list_entry(qh->qtd_list.next,
+ struct fotg210_qtd, qtd_list);
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
+ qh->hw->hw_qtd_next = qtd->hw_next;
+ qtd = NULL;
+ }
+ }
+
+ if (qtd)
+ qh_update(fotg210, qh, qtd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh = ep->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh->clearing_tt = 0;
+ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+ && fotg210->rh_state == FOTG210_RH_RUNNING)
+ qh_link_async(fotg210, qh);
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh,
+ struct urb *urb, u32 token)
+{
+
+ /* If an async split transaction gets an error or is unlinked,
+ * the TT buffer may be left in an indeterminate state. We
+ * have to clear the TT buffer.
+ *
+ * Note: this routine is never called for Isochronous transfers.
+ */
+ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+ struct usb_device *tt = urb->dev->tt->hub;
+ dev_dbg(&tt->dev,
+ "clear tt buffer port %d, a%d ep%d t%08x\n",
+ urb->dev->ttport, urb->dev->devnum,
+ usb_pipeendpoint(urb->pipe), token);
+
+ if (urb->dev->tt->hub !=
+ fotg210_to_hcd(fotg210)->self.root_hub) {
+ if (usb_hub_clear_tt_buffer(urb) == 0)
+ qh->clearing_tt = 1;
+ }
+ }
+}
+
+static int qtd_copy_status(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ size_t length,
+ u32 token
+)
+{
+ int status = -EINPROGRESS;
+
+ /* count IN/OUT bytes, not SETUP (even short packets) */
+ if (likely(QTD_PID(token) != 2))
+ urb->actual_length += length - QTD_LENGTH(token);
+
+ /* don't modify error codes */
+ if (unlikely(urb->unlinked))
+ return status;
+
+ /* force cleanup after short read; not always an error */
+ if (unlikely(IS_SHORT_READ(token)))
+ status = -EREMOTEIO;
+
+ /* serious "can't proceed" faults reported by the hardware */
+ if (token & QTD_STS_HALT) {
+ if (token & QTD_STS_BABBLE) {
+ /* FIXME "must" disable babbling device's port too */
+ status = -EOVERFLOW;
+ /* CERR nonzero + halt --> stall */
+ } else if (QTD_CERR(token)) {
+ status = -EPIPE;
+
+ /* In theory, more than one of the following bits can be set
+ * since they are sticky and the transaction is retried.
+ * Which to test first is rather arbitrary.
+ */
+ } else if (token & QTD_STS_MMF) {
+ /* fs/ls interrupt xfer missed the complete-split */
+ status = -EPROTO;
+ } else if (token & QTD_STS_DBE) {
+ status = (QTD_PID(token) == 1) /* IN ? */
+ ? -ENOSR /* hc couldn't read data */
+ : -ECOMM; /* hc couldn't write data */
+ } else if (token & QTD_STS_XACT) {
+ /* timeout, bad CRC, wrong PID, etc */
+ fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+ status = -EPROTO;
+ } else { /* unknown */
+ status = -EPROTO;
+ }
+
+ fotg210_dbg(fotg210,
+ "dev%d ep%d%s qtd token %08x --> status %d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ token, status);
+ }
+
+ return status;
+}
+
+static void
+fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, int status)
+__releases(fotg210->lock)
+__acquires(fotg210->lock)
+{
+ if (likely(urb->hcpriv != NULL)) {
+ struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
+
+ /* S-mask in a QH means it's an interrupt urb */
+ if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
+
+ /* ... update hc-wide periodic stats (for usbfs) */
+ fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--;
+ }
+ }
+
+ if (unlikely(urb->unlinked)) {
+ COUNT(fotg210->stats.unlink);
+ } else {
+ /* report non-error and short read status as zero */
+ if (status == -EINPROGRESS || status == -EREMOTEIO)
+ status = 0;
+ COUNT(fotg210->stats.complete);
+ }
+
+#ifdef FOTG210_URB_TRACE
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ status,
+ urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+ /* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ spin_unlock(&fotg210->lock);
+ usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status);
+ spin_lock(&fotg210->lock);
+}
+
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/*
+ * Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current. Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned
+qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qtd *last, *end = qh->dummy;
+ struct list_head *entry, *tmp;
+ int last_status;
+ int stopped;
+ unsigned count = 0;
+ u8 state;
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ if (unlikely(list_empty(&qh->qtd_list)))
+ return count;
+
+ /* completions (or tasks on other cpus) must never clobber HALT
+ * till we've gone through and cleaned everything up, even when
+ * they add urbs to this qh's queue or mark them for unlinking.
+ *
+ * NOTE: unlinking expects to be done in queue order.
+ *
+ * It's a bug for qh->qh_state to be anything other than
+ * QH_STATE_IDLE, unless our caller is scan_async() or
+ * scan_intr().
+ */
+ state = qh->qh_state;
+ qh->qh_state = QH_STATE_COMPLETING;
+ stopped = (state == QH_STATE_IDLE);
+
+ rescan:
+ last = NULL;
+ last_status = -EINPROGRESS;
+ qh->needs_rescan = 0;
+
+ /* remove de-activated QTDs from front of queue.
+ * after faults (including short reads), cleanup this urb
+ * then let the queue advance.
+ * if queue is stopped, handles unlinks.
+ */
+ list_for_each_safe(entry, tmp, &qh->qtd_list) {
+ struct fotg210_qtd *qtd;
+ struct urb *urb;
+ u32 token = 0;
+
+ qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+ urb = qtd->urb;
+
+ /* clean up any state from previous QTD ...*/
+ if (last) {
+ if (likely(last->urb != urb)) {
+ fotg210_urb_done(fotg210, last->urb,
+ last_status);
+ count++;
+ last_status = -EINPROGRESS;
+ }
+ fotg210_qtd_free(fotg210, last);
+ last = NULL;
+ }
+
+ /* ignore urbs submitted during completions we reported */
+ if (qtd == end)
+ break;
+
+ /* hardware copies qtd out of qh overlay */
+ rmb();
+ token = hc32_to_cpu(fotg210, qtd->hw_token);
+
+ /* always clean up qtds the hc de-activated */
+ retry_xacterr:
+ if ((token & QTD_STS_ACTIVE) == 0) {
+
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ fotg210_dbg(fotg210,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc)
+ ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
+ /* on STALL, error, and short reads this urb must
+ * complete and all its qtds must be recycled.
+ */
+ if ((token & QTD_STS_HALT) != 0) {
+
+ /* retry transaction errors until we
+ * reach the software xacterr limit
+ */
+ if ((token & QTD_STS_XACT) &&
+ QTD_CERR(token) == 0 &&
+ ++qh->xacterrs < QH_XACTERR_MAX &&
+ !urb->unlinked) {
+ fotg210_dbg(fotg210,
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+ /* reset the token in the qtd and the
+ * qh overlay (which still contains
+ * the qtd) so that we pick up from
+ * where we left off
+ */
+ token &= ~QTD_STS_HALT;
+ token |= QTD_STS_ACTIVE |
+ (FOTG210_TUNE_CERR << 10);
+ qtd->hw_token = cpu_to_hc32(fotg210,
+ token);
+ wmb();
+ hw->hw_token = cpu_to_hc32(fotg210,
+ token);
+ goto retry_xacterr;
+ }
+ stopped = 1;
+
+ /* magic dummy for some short reads; qh won't advance.
+ * that silicon quirk can kick in with this dummy too.
+ *
+ * other short reads won't stop the queue, including
+ * control transfers (status stage handles that) or
+ * most other single-qtd reads ... the queue stops if
+ * URB_SHORT_NOT_OK was set so the driver submitting
+ * the urbs could clean it up.
+ */
+ } else if (IS_SHORT_READ(token)
+ && !(qtd->hw_alt_next
+ & FOTG210_LIST_END(fotg210))) {
+ stopped = 1;
+ }
+
+ /* stop scanning when we reach qtds the hc is using */
+ } else if (likely(!stopped
+ && fotg210->rh_state >= FOTG210_RH_RUNNING)) {
+ break;
+
+ /* scan the whole queue for unlinks whenever it stops */
+ } else {
+ stopped = 1;
+
+ /* cancel everything if we halt, suspend, etc */
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ last_status = -ESHUTDOWN;
+
+ /* this qtd is active; skip it unless a previous qtd
+ * for its urb faulted, or its urb was canceled.
+ */
+ else if (last_status == -EINPROGRESS && !urb->unlinked)
+ continue;
+
+ /* qh unlinked; token in overlay may be most current */
+ if (state == QH_STATE_IDLE
+ && cpu_to_hc32(fotg210, qtd->qtd_dma)
+ == hw->hw_current) {
+ token = hc32_to_cpu(fotg210, hw->hw_token);
+
+ /* An unlink may leave an incomplete
+ * async transaction in the TT buffer.
+ * We have to clear it.
+ */
+ fotg210_clear_tt_buffer(fotg210, qh, urb,
+ token);
+ }
+ }
+
+ /* unless we already know the urb's status, collect qtd status
+ * and update count of bytes transferred. in common short read
+ * cases with only one data qtd (including control transfers),
+ * queue processing won't halt. but with two or more qtds (for
+ * example, with a 32 KB transfer), when the first qtd gets a
+ * short read the second must be removed by hand.
+ */
+ if (last_status == -EINPROGRESS) {
+ last_status = qtd_copy_status(fotg210, urb,
+ qtd->length, token);
+ if (last_status == -EREMOTEIO
+ && (qtd->hw_alt_next
+ & FOTG210_LIST_END(fotg210)))
+ last_status = -EINPROGRESS;
+
+ /* As part of low/full-speed endpoint-halt processing
+ * we must clear the TT buffer (11.17.5).
+ */
+ if (unlikely(last_status != -EINPROGRESS &&
+ last_status != -EREMOTEIO)) {
+ /* The TT's in some hubs malfunction when they
+ * receive this request following a STALL (they
+ * stop sending isochronous packets). Since a
+ * STALL can't leave the TT buffer in a busy
+ * state (if you believe Figures 11-48 - 11-51
+ * in the USB 2.0 spec), we won't clear the TT
+ * buffer in this case. Strictly speaking this
+ * is a violation of the spec.
+ */
+ if (last_status != -EPIPE)
+ fotg210_clear_tt_buffer(fotg210, qh,
+ urb, token);
+ }
+ }
+
+ /* if we're removing something not at the queue head,
+ * patch the hardware queue pointer.
+ */
+ if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+ last = list_entry(qtd->qtd_list.prev,
+ struct fotg210_qtd, qtd_list);
+ last->hw_next = qtd->hw_next;
+ }
+
+ /* remove qtd; it's recycled after possible urb completion */
+ list_del(&qtd->qtd_list);
+ last = qtd;
+
+ /* reinit the xacterr counter for the next qtd */
+ qh->xacterrs = 0;
+ }
+
+ /* last urb's completion might still need calling */
+ if (likely(last != NULL)) {
+ fotg210_urb_done(fotg210, last->urb, last_status);
+ count++;
+ fotg210_qtd_free(fotg210, last);
+ }
+
+ /* Do we need to rescan for URBs dequeued during a giveback? */
+ if (unlikely(qh->needs_rescan)) {
+ /* If the QH is already unlinked, do the rescan now. */
+ if (state == QH_STATE_IDLE)
+ goto rescan;
+
+ /* Otherwise we have to wait until the QH is fully unlinked.
+ * Our caller will start an unlink if qh->needs_rescan is
+ * set. But if an unlink has already started, nothing needs
+ * to be done.
+ */
+ if (state != QH_STATE_LINKED)
+ qh->needs_rescan = 0;
+ }
+
+ /* restore original state; caller must unlink or relink */
+ qh->qh_state = state;
+
+ /* be sure the hardware's done with the qh before refreshing
+ * it after fault cleanup, or recovering from silicon wrongly
+ * overlaying the dummy qtd (which reduces DMA chatter).
+ */
+ if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) {
+ switch (state) {
+ case QH_STATE_IDLE:
+ qh_refresh(fotg210, qh);
+ break;
+ case QH_STATE_LINKED:
+ /* We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
+ * except maybe high bandwidth ...
+ */
+
+ /* Tell the caller to start an unlink */
+ qh->needs_rescan = 1;
+ break;
+ /* otherwise, unlink already started */
+ }
+ }
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+/* ... and packet size, for any kind of endpoint descriptor */
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/*
+ * reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list
+) {
+ struct list_head *entry, *temp;
+
+ list_for_each_safe(entry, temp, qtd_list) {
+ struct fotg210_qtd *qtd;
+
+ qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+ list_del(&qtd->qtd_list);
+ fotg210_qtd_free(fotg210, qtd);
+ }
+}
+
+/*
+ * create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *
+qh_urb_transaction(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *head,
+ gfp_t flags
+) {
+ struct fotg210_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, this_sg_len, maxpacket;
+ int is_input;
+ u32 token;
+ int i;
+ struct scatterlist *sg;
+
+ /*
+ * URBs map to sequences of QTDs: one logical transaction
+ */
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ return NULL;
+ list_add_tail(&qtd->qtd_list, head);
+ qtd->urb = urb;
+
+ token = QTD_STS_ACTIVE;
+ token |= (FOTG210_TUNE_CERR << 10);
+ /* for split transactions, SplitXState initialized to zero */
+
+ len = urb->transfer_buffer_length;
+ is_input = usb_pipein(urb->pipe);
+ if (usb_pipecontrol(urb->pipe)) {
+ /* SETUP pid */
+ qtd_fill(fotg210, qtd, urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* for zero length DATA stages, STATUS is always IN */
+ if (len == 0)
+ token |= (1 /* "in" */ << 8);
+ }
+
+ /*
+ * data transfer stage: buffer setup
+ */
+ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ buf = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ buf = urb->transfer_dma;
+ this_sg_len = len;
+ }
+
+ if (is_input)
+ token |= (1 /* "in" */ << 8);
+ /* else it's already initted to "out" pid (0 << 8) */
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+ /*
+ * buffer gets wrapped in one or more qtds;
+ * last one may be "short" (including zero len)
+ * and may serve as a control status ack
+ */
+ for (;;) {
+ int this_qtd_len;
+
+ this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
+ maxpacket);
+ this_sg_len -= this_qtd_len;
+ len -= this_qtd_len;
+ buf += this_qtd_len;
+
+ /*
+ * short reads advance to a "magic" dummy instead of the next
+ * qtd ... that forces the queue to stop, for manual cleanup.
+ * (this will usually be overridden later.)
+ */
+ if (is_input)
+ qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
+
+ /* qh makes control packets use qtd toggle; maybe switch it */
+ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+ token ^= QTD_TOGGLE;
+
+ if (likely(this_sg_len <= 0)) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ buf = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
+
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+ }
+
+ /*
+ * unless the caller requires manual cleanup after short reads,
+ * have the alt_next mechanism keep the queue running after the
+ * last data qtd (the only one, for control and most other cases).
+ */
+ if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+ || usb_pipecontrol(urb->pipe)))
+ qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+ /*
+ * control requests may need a terminating data "status" ack;
+ * other OUT ones may need a terminating short packet
+ * (zero length).
+ */
+ if (likely(urb->transfer_buffer_length != 0)) {
+ int one_more = 0;
+
+ if (usb_pipecontrol(urb->pipe)) {
+ one_more = 1;
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+ } else if (usb_pipeout(urb->pipe)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && !(urb->transfer_buffer_length % maxpacket)) {
+ one_more = 1;
+ }
+ if (one_more) {
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* never any data in such packets */
+ qtd_fill(fotg210, qtd, 0, 0, token, 0);
+ }
+ }
+
+ /* by default, enable interrupt on urb completion */
+ if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
+ return head;
+
+cleanup:
+ qtd_list_free(fotg210, urb, head);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+/*
+ * Would be best to create all qh's from config descriptors,
+ * when each interface/altsetting is established. Unlink
+ * any previous qh and cancel its urbs first; endpoints are
+ * implicitly reset then (data toggle too).
+ * That'd mean updating how usbcore talks to HCDs. (2.7?)
+*/
+
+
+/*
+ * Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled. For highspeed, that's
+ * just one microframe in the s-mask. For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct fotg210_qh *
+qh_make(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ gfp_t flags
+) {
+ struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
+ u32 info1 = 0, info2 = 0;
+ int is_input, type;
+ int maxp = 0;
+ struct usb_tt *tt = urb->dev->tt;
+ struct fotg210_qh_hw *hw;
+
+ if (!qh)
+ return qh;
+
+ /*
+ * init endpoint/device data for this QH
+ */
+ info1 |= usb_pipeendpoint(urb->pipe) << 8;
+ info1 |= usb_pipedevice(urb->pipe) << 0;
+
+ is_input = usb_pipein(urb->pipe);
+ type = usb_pipetype(urb->pipe);
+ maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+ /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
+ * acts like up to 3KB, but is built from smaller packets.
+ */
+ if (max_packet(maxp) > 1024) {
+ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
+ max_packet(maxp));
+ goto done;
+ }
+
+ /* Compute interrupt scheduling parameters just once, and save.
+ * - allowing for high bandwidth, how many nsec/uframe are used?
+ * - split transactions need a second CSPLIT uframe; same question
+ * - splits also need a schedule gap (for full/low speed I/O)
+ * - qh has a polling interval
+ *
+ * For control/bulk requests, the HC or TT handles these.
+ */
+ if (type == PIPE_INTERRUPT) {
+ qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ is_input, 0,
+ hb_mult(maxp) * max_packet(maxp)));
+ qh->start = NO_FRAME;
+
+ if (urb->dev->speed == USB_SPEED_HIGH) {
+ qh->c_usecs = 0;
+ qh->gap_uf = 0;
+
+ qh->period = urb->interval >> 3;
+ if (qh->period == 0 && urb->interval != 1) {
+ /* NOTE interval 2 or 4 uframes could work.
+ * But interval 1 scheduling is simpler, and
+ * includes high bandwidth.
+ */
+ urb->interval = 1;
+ } else if (qh->period > fotg210->periodic_size) {
+ qh->period = fotg210->periodic_size;
+ urb->interval = qh->period << 3;
+ }
+ } else {
+ int think_time;
+
+ /* gap is f(FS/LS transfer times) */
+ qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, maxp) / (125 * 1000);
+
+ /* FIXME this just approximates SPLIT/CSPLIT times */
+ if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
+ qh->c_usecs = qh->usecs + HS_USECS(0);
+ qh->usecs = HS_USECS(1);
+ } else { /* SPLIT+DATA, gap, CSPLIT */
+ qh->usecs += HS_USECS(1);
+ qh->c_usecs = HS_USECS(0);
+ }
+
+ think_time = tt ? tt->think_time : 0;
+ qh->tt_usecs = NS_TO_US(think_time +
+ usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, max_packet(maxp)));
+ qh->period = urb->interval;
+ if (qh->period > fotg210->periodic_size) {
+ qh->period = fotg210->periodic_size;
+ urb->interval = qh->period;
+ }
+ }
+ }
+
+ /* support for tt scheduling, and access to toggles */
+ qh->dev = urb->dev;
+
+ /* using TT? */
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ info1 |= QH_LOW_SPEED;
+ /* FALL THROUGH */
+
+ case USB_SPEED_FULL:
+ /* EPS 0 means "full" */
+ if (type != PIPE_INTERRUPT)
+ info1 |= (FOTG210_TUNE_RL_TT << 28);
+ if (type == PIPE_CONTROL) {
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ }
+ info1 |= maxp << 16;
+
+ info2 |= (FOTG210_TUNE_MULT_TT << 30);
+
+ /* Some Freescale processors have an erratum in which the
+ * port number in the queue head was 0..N-1 instead of 1..N.
+ */
+ if (fotg210_has_fsl_portno_bug(fotg210))
+ info2 |= (urb->dev->ttport-1) << 23;
+ else
+ info2 |= urb->dev->ttport << 23;
+
+ /* set the address of the TT; for TDI's integrated
+ * root hub tt, leave it zeroed.
+ */
+ if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub)
+ info2 |= tt->hub->devnum << 16;
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+ break;
+
+ case USB_SPEED_HIGH: /* no TT involved */
+ info1 |= QH_HIGH_SPEED;
+ if (type == PIPE_CONTROL) {
+ info1 |= (FOTG210_TUNE_RL_HS << 28);
+ info1 |= 64 << 16; /* usb2 fixed maxpacket */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ info2 |= (FOTG210_TUNE_MULT_HS << 30);
+ } else if (type == PIPE_BULK) {
+ info1 |= (FOTG210_TUNE_RL_HS << 28);
+ /* The USB spec says that high speed bulk endpoints
+ * always use 512 byte maxpacket. But some device
+ * vendors decided to ignore that, and MSFT is happy
+ * to help them do so. So now people expect to use
+ * such nonconformant devices with Linux too; sigh.
+ */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= (FOTG210_TUNE_MULT_HS << 30);
+ } else { /* PIPE_INTERRUPT */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= hb_mult(maxp) << 30;
+ }
+ break;
+ default:
+ fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
+ urb->dev->speed);
+done:
+ qh_destroy(fotg210, qh);
+ return NULL;
+ }
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+ /* init as live, toggle clear, advance to dummy */
+ qh->qh_state = QH_STATE_IDLE;
+ hw = qh->hw;
+ hw->hw_info1 = cpu_to_hc32(fotg210, info1);
+ hw->hw_info2 = cpu_to_hc32(fotg210, info2);
+ qh->is_out = !is_input;
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
+ qh_refresh(fotg210, qh);
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_async(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ fotg210_poll_ASS(fotg210);
+ turn_on_io_watchdog(fotg210);
+}
+
+static void disable_async(struct fotg210_hcd *fotg210)
+{
+ if (--fotg210->async_count)
+ return;
+
+ /* The async schedule and async_unlink list are supposed to be empty */
+ WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink);
+
+ /* Don't turn off the schedule until ASS is 1 */
+ fotg210_poll_ASS(fotg210);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue. */
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ __hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
+ struct fotg210_qh *head;
+
+ /* Don't link a QH if there's a Clear-TT-Buffer pending */
+ if (unlikely(qh->clearing_tt))
+ return;
+
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+ /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ qh_refresh(fotg210, qh);
+
+ /* splice right after start */
+ head = fotg210->async;
+ qh->qh_next = head->qh_next;
+ qh->hw->hw_next = head->hw->hw_next;
+ wmb();
+
+ head->qh_next.qh = qh;
+ head->hw->hw_next = dma;
+
+ qh->xacterrs = 0;
+ qh->qh_state = QH_STATE_LINKED;
+ /* qtd completions reported later by interrupt */
+
+ enable_async(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct fotg210_qh *qh_append_tds(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ int epnum,
+ void **ptr
+)
+{
+ struct fotg210_qh *qh = NULL;
+ __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
+
+ qh = (struct fotg210_qh *) *ptr;
+ if (unlikely(qh == NULL)) {
+ /* can't sleep here, we have fotg210->lock... */
+ qh = qh_make(fotg210, urb, GFP_ATOMIC);
+ *ptr = qh;
+ }
+ if (likely(qh != NULL)) {
+ struct fotg210_qtd *qtd;
+
+ if (unlikely(list_empty(qtd_list)))
+ qtd = NULL;
+ else
+ qtd = list_entry(qtd_list->next, struct fotg210_qtd,
+ qtd_list);
+
+ /* control qh may need patching ... */
+ if (unlikely(epnum == 0)) {
+ /* usb_reset_device() briefly reverts to address 0 */
+ if (usb_pipedevice(urb->pipe) == 0)
+ qh->hw->hw_info1 &= ~qh_addr_mask;
+ }
+
+ /* just one way to queue requests: swap with the dummy qtd.
+ * only hc or qh_refresh() ever modify the overlay.
+ */
+ if (likely(qtd != NULL)) {
+ struct fotg210_qtd *dummy;
+ dma_addr_t dma;
+ __hc32 token;
+
+ /* to avoid racing the HC, use the dummy td instead of
+ * the first td of our list (becomes new dummy). both
+ * tds stay deactivated until we're done, when the
+ * HC is allowed to fetch the old dummy (4.10.2).
+ */
+ token = qtd->hw_token;
+ qtd->hw_token = HALT_BIT(fotg210);
+
+ dummy = qh->dummy;
+
+ dma = dummy->qtd_dma;
+ *dummy = *qtd;
+ dummy->qtd_dma = dma;
+
+ list_del(&qtd->qtd_list);
+ list_add(&dummy->qtd_list, qtd_list);
+ list_splice_tail(qtd_list, &qh->qtd_list);
+
+ fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
+ qh->dummy = qtd;
+
+ /* hc must see the new dummy at list end */
+ dma = qtd->qtd_dma;
+ qtd = list_entry(qh->qtd_list.prev,
+ struct fotg210_qtd, qtd_list);
+ qtd->hw_next = QTD_NEXT(fotg210, dma);
+
+ /* let the hc process these next qtds */
+ wmb();
+ dummy->hw_token = token;
+
+ urb->hcpriv = qh;
+ }
+ }
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+submit_async(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ int epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh = NULL;
+ int rc;
+
+ epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef FOTG210_URB_TRACE
+ {
+ struct fotg210_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
+#endif
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ rc = -ESHUTDOWN;
+ goto done;
+ }
+ rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(rc))
+ goto done;
+
+ qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ if (unlikely(qh == NULL)) {
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ /* Control/bulk operations through TTs don't need scheduling,
+ * the HC and TT handle it when the TT has a buffer ready.
+ */
+ if (likely(qh->qh_state == QH_STATE_IDLE))
+ qh_link_async(fotg210, qh);
+ done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ if (unlikely(qh == NULL))
+ qtd_list_free(fotg210, urb, qtd_list);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void single_unlink_async(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ struct fotg210_qh *prev;
+
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK;
+ if (fotg210->async_unlink)
+ fotg210->async_unlink_last->unlink_next = qh;
+ else
+ fotg210->async_unlink = qh;
+ fotg210->async_unlink_last = qh;
+
+ /* Unlink it from the schedule */
+ prev = fotg210->async;
+ while (prev->qh_next.qh != qh)
+ prev = prev->qh_next.qh;
+
+ prev->hw->hw_next = qh->hw->hw_next;
+ prev->qh_next = qh->qh_next;
+ if (fotg210->qh_scan_next == qh)
+ fotg210->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
+{
+ /*
+ * Do nothing if an IAA cycle is already running or
+ * if one will be started shortly.
+ */
+ if (fotg210->async_iaa || fotg210->async_unlinking)
+ return;
+
+ /* Do all the waiting QHs at once */
+ fotg210->async_iaa = fotg210->async_unlink;
+ fotg210->async_unlink = NULL;
+
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) {
+ if (!nested) /* Avoid recursion */
+ end_unlink_async(fotg210);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) {
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ fotg210_writel(fotg210, fotg210->command | CMD_IAAD,
+ &fotg210->regs->command);
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
+ true);
+ }
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+
+ /* Process the idle QHs */
+ restart:
+ fotg210->async_unlinking = true;
+ while (fotg210->async_iaa) {
+ qh = fotg210->async_iaa;
+ fotg210->async_iaa = qh->unlink_next;
+ qh->unlink_next = NULL;
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ qh_completions(fotg210, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ fotg210->rh_state == FOTG210_RH_RUNNING)
+ qh_link_async(fotg210, qh);
+ disable_async(fotg210);
+ }
+ fotg210->async_unlinking = false;
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fotg210->async_unlink) {
+ start_iaa_cycle(fotg210, true);
+ if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING))
+ goto restart;
+ }
+}
+
+static void unlink_empty_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh, *next;
+ bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+ bool check_unlinks_later = false;
+
+ /* Unlink all the async QHs that have been empty for a timer cycle */
+ next = fotg210->async->qh_next.qh;
+ while (next) {
+ qh = next;
+ next = qh->qh_next.qh;
+
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ if (!stopped && qh->unlink_cycle ==
+ fotg210->async_unlink_cycle)
+ check_unlinks_later = true;
+ else
+ single_unlink_async(fotg210, qh);
+ }
+ }
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fotg210->async_unlink)
+ start_iaa_cycle(fotg210, false);
+
+ /* QHs that haven't been empty for long enough will be handled later */
+ if (check_unlinks_later) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
+ true);
+ ++fotg210->async_unlink_cycle;
+ }
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own fotg210->lock */
+
+static void start_unlink_async(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ /*
+ * If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ single_unlink_async(fotg210, qh);
+ start_iaa_cycle(fotg210, false);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+ bool check_unlinks_later = false;
+
+ fotg210->qh_scan_next = fotg210->async->qh_next.qh;
+ while (fotg210->qh_scan_next) {
+ qh = fotg210->qh_scan_next;
+ fotg210->qh_scan_next = qh->qh_next.qh;
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fotg210->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fotg210->qh_scan_next is adjusted
+ * in single_unlink_async().
+ */
+ temp = qh_completions(fotg210, qh);
+ if (qh->needs_rescan) {
+ start_unlink_async(fotg210, qh);
+ } else if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ qh->unlink_cycle = fotg210->async_unlink_cycle;
+ check_unlinks_later = true;
+ } else if (temp != 0)
+ goto rescan;
+ }
+ }
+
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
+ !(fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_ASYNC_UNLINKS, true);
+ ++fotg210->async_unlink_cycle;
+ }
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI scheduled transaction support: interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+
+static int fotg210_get_frame(struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd
+ * @tag: hardware tag for type of this record
+ */
+static union fotg210_shadow *
+periodic_next_shadow(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
+{
+ switch (hc32_to_cpu(fotg210, tag)) {
+ case Q_TYPE_QH:
+ return &periodic->qh->qh_next;
+ case Q_TYPE_FSTN:
+ return &periodic->fstn->fstn_next;
+ default:
+ return &periodic->itd->itd_next;
+ }
+}
+
+static __hc32 *
+shadow_next_periodic(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
+{
+ switch (hc32_to_cpu(fotg210, tag)) {
+ /* our fotg210_shadow.qh is actually software part */
+ case Q_TYPE_QH:
+ return &periodic->qh->hw->hw_next;
+ /* others are hw parts */
+ default:
+ return periodic->hw_next;
+ }
+}
+
+/* caller must hold fotg210->lock */
+static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
+ void *ptr)
+{
+ union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev_p;
+
+ /* find predecessor of "ptr"; hw and shadow lists are in sync */
+ while (here.ptr && here.ptr != ptr) {
+ prev_p = periodic_next_shadow(fotg210, prev_p,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+ hw_p = shadow_next_periodic(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+ here = *prev_p;
+ }
+ /* an interrupt entry (at list end) could have been shared */
+ if (!here.ptr)
+ return;
+
+ /* update shadow and hardware lists ... the old "next" pointers
+ * from ptr may still be in use, the caller updates them.
+ */
+ *prev_p = *periodic_next_shadow(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+
+ *hw_p = *shadow_next_periodic(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short
+periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe)
+{
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow *q = &fotg210->pshadow[frame];
+ unsigned usecs = 0;
+ struct fotg210_qh_hw *hw;
+
+ while (q->ptr) {
+ switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
+ case Q_TYPE_QH:
+ hw = q->qh->hw;
+ /* is it in the S-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe))
+ usecs += q->qh->usecs;
+ /* ... or C-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fotg210,
+ 1 << (8 + uframe)))
+ usecs += q->qh->c_usecs;
+ hw_p = &hw->hw_next;
+ q = &q->qh->qh_next;
+ break;
+ /* case Q_TYPE_FSTN: */
+ default:
+ /* for "save place" FSTNs, count the relevant INTR
+ * bandwidth from the previous frame
+ */
+ if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
+ fotg210_dbg(fotg210, "ignoring FSTN cost ...\n");
+
+ hw_p = &q->fstn->hw_next;
+ q = &q->fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ if (q->itd->hw_transaction[uframe])
+ usecs += q->itd->stream->usecs;
+ hw_p = &q->itd->hw_next;
+ q = &q->itd->itd_next;
+ break;
+ }
+ }
+ if (usecs > fotg210->uframe_periodic_max)
+ fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
+ frame * 8 + uframe, usecs);
+ return usecs;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
+{
+ if (!dev1->tt || !dev2->tt)
+ return 0;
+ if (dev1->tt != dev2->tt)
+ return 0;
+ if (dev1->tt->multi)
+ return dev1->ttport == dev2->ttport;
+ else
+ return 1;
+}
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision(
+ struct fotg210_hcd *fotg210,
+ unsigned period,
+ struct usb_device *dev,
+ unsigned frame,
+ u32 uf_mask
+)
+{
+ if (period == 0) /* error */
+ return 0;
+
+ /* note bandwidth wastage: split never follows csplit
+ * (different dev or endpoint) until the next uframe.
+ * calling convention doesn't make that distinction.
+ */
+ for (; frame < fotg210->periodic_size; frame += period) {
+ union fotg210_shadow here;
+ __hc32 type;
+ struct fotg210_qh_hw *hw;
+
+ here = fotg210->pshadow[frame];
+ type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
+ while (here.ptr) {
+ switch (hc32_to_cpu(fotg210, type)) {
+ case Q_TYPE_ITD:
+ type = Q_NEXT_TYPE(fotg210, here.itd->hw_next);
+ here = here.itd->itd_next;
+ continue;
+ case Q_TYPE_QH:
+ hw = here.qh->hw;
+ if (same_tt(dev, here.qh->dev)) {
+ u32 mask;
+
+ mask = hc32_to_cpu(fotg210,
+ hw->hw_info2);
+ /* "knows" no gap is needed */
+ mask |= mask >> 8;
+ if (mask & uf_mask)
+ break;
+ }
+ type = Q_NEXT_TYPE(fotg210, hw->hw_next);
+ here = here.qh->qh_next;
+ continue;
+ /* case Q_TYPE_FSTN: */
+ default:
+ fotg210_dbg(fotg210,
+ "periodic frame %d bogus type %d\n",
+ frame, type);
+ }
+
+ /* collision or error */
+ return 0;
+ }
+ }
+
+ /* no collision */
+ return 1;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_periodic(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->periodic_count++)
+ return;
+
+ /* Stop waiting to turn off the periodic schedule */
+ fotg210->enabled_hrtimer_events &=
+ ~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC);
+
+ /* Don't start the schedule until PSS is 0 */
+ fotg210_poll_PSS(fotg210);
+ turn_on_io_watchdog(fotg210);
+}
+
+static void disable_periodic(struct fotg210_hcd *fotg210)
+{
+ if (--fotg210->periodic_count)
+ return;
+
+ /* Don't turn off the schedule until PSS is 1 */
+ fotg210_poll_PSS(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; fotg210 0.96+)
+ */
+static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
+
+ dev_dbg(&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, hc32_to_cpup(fotg210, &qh->hw->hw_info2)
+ & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < fotg210->periodic_size; i += period) {
+ union fotg210_shadow *prev = &fotg210->pshadow[i];
+ __hc32 *hw_p = &fotg210->periodic[i];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fotg210, prev, type);
+ hw_p = shadow_next_periodic(fotg210, &here, type);
+ here = *prev;
+ }
+
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = &here.qh->qh_next;
+ hw_p = &here.qh->hw->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw->hw_next = *hw_p;
+ wmb();
+ prev->qh = qh;
+ *hw_p = QH_NEXT(fotg210, qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh->xacterrs = 0;
+
+ /* update per-qh bandwidth for usbfs */
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ list_add(&qh->intr_node, &fotg210->intr_qh_list);
+
+ /* maybe enable periodic schedule processing */
+ ++fotg210->intr_count;
+ enable_periodic(fotg210);
+}
+
+static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
+
+ /* high bandwidth, or otherwise part of every microframe */
+ period = qh->period;
+ if (!period)
+ period = 1;
+
+ for (i = qh->start; i < fotg210->periodic_size; i += period)
+ periodic_unlink(fotg210, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg(&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period,
+ hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
+ (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
+ qh->qh_state = QH_STATE_UNLINK;
+ qh->qh_next.ptr = NULL;
+
+ if (fotg210->qh_scan_next == qh)
+ fotg210->qh_scan_next = list_entry(qh->intr_node.next,
+ struct fotg210_qh, intr_node);
+ list_del(&qh->intr_node);
+}
+
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ qh_unlink_periodic(fotg210, qh);
+
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
+ */
+ qh->unlink_cycle = fotg210->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ if (fotg210->intr_unlink)
+ fotg210->intr_unlink_last->unlink_next = qh;
+ else
+ fotg210->intr_unlink = qh;
+ fotg210->intr_unlink_last = qh;
+
+ if (fotg210->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ fotg210_handle_intr_unlinks(fotg210);
+ else if (fotg210->intr_unlink == qh) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+ true);
+ ++fotg210->intr_unlink_cycle;
+ }
+}
+
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+ int rc;
+
+ qh->qh_state = QH_STATE_IDLE;
+ hw->hw_next = FOTG210_LIST_END(fotg210);
+
+ qh_completions(fotg210, qh);
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list) &&
+ fotg210->rh_state == FOTG210_RH_RUNNING) {
+ rc = qh_schedule(fotg210, qh);
+
+ /* An error here likely indicates handshake failure
+ * or no space left in the schedule. Neither fault
+ * should happen often ...
+ *
+ * FIXME kill the now-dysfunctional queued urbs
+ */
+ if (rc != 0)
+ fotg210_err(fotg210, "can't reschedule qh %p, err %d\n",
+ qh, rc);
+ }
+
+ /* maybe turn off periodic schedule */
+ --fotg210->intr_count;
+ disable_periodic(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_period(
+ struct fotg210_hcd *fotg210,
+ unsigned frame,
+ unsigned uframe,
+ unsigned period,
+ unsigned usecs
+) {
+ int claimed;
+
+ /* complete split running into next frame?
+ * given FSTN support, we could sometimes check...
+ */
+ if (uframe >= 8)
+ return 0;
+
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = fotg210->uframe_periodic_max - usecs;
+
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely(period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs(fotg210, frame,
+ uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < fotg210->periodic_size);
+
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs(fotg210, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < fotg210->periodic_size);
+ }
+
+ /* success! */
+ return 1;
+}
+
+static int check_intr_schedule(
+ struct fotg210_hcd *fotg210,
+ unsigned frame,
+ unsigned uframe,
+ const struct fotg210_qh *qh,
+ __hc32 *c_maskp
+)
+{
+ int retval = -ENOSPC;
+ u8 mask = 0;
+
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
+
+ if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs))
+ goto done;
+ if (!qh->c_usecs) {
+ retval = 0;
+ *c_maskp = 0;
+ goto done;
+ }
+
+ /* Make sure this tt's buffer is also available for CSPLITs.
+ * We pessimize a bit; probably the typical full speed case
+ * doesn't need the second CSPLIT.
+ *
+ * NOTE: both SPLIT and CSPLIT could be checked in just
+ * one smart pass...
+ */
+ mask = 0x03 << (uframe + qh->gap_uf);
+ *c_maskp = cpu_to_hc32(fotg210, mask << 8);
+
+ mask |= 1 << uframe;
+ if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
+ if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
+ qh->period, qh->c_usecs))
+ goto done;
+ if (!check_period(fotg210, frame, uframe + qh->gap_uf,
+ qh->period, qh->c_usecs))
+ goto done;
+ retval = 0;
+ }
+done:
+ return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ int status;
+ unsigned uframe;
+ __hc32 c_mask;
+ unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ qh_refresh(fotg210, qh);
+ hw->hw_next = FOTG210_LIST_END(fotg210);
+ frame = qh->start;
+
+ /* reuse the previous schedule slots, if we can */
+ if (frame < qh->period) {
+ uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK);
+ status = check_intr_schedule(fotg210, frame, --uframe,
+ qh, &c_mask);
+ } else {
+ uframe = 0;
+ c_mask = 0;
+ status = -ENOSPC;
+ }
+
+ /* else scan the schedule to find a group of slots such that all
+ * uframes have enough periodic bandwidth available.
+ */
+ if (status) {
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->period) {
+ int i;
+
+ for (i = qh->period; status && i > 0; --i) {
+ frame = ++fotg210->random_frame % qh->period;
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule(fotg210,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ }
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule(fotg210, 0, 0, qh,
+ &c_mask);
+ }
+ if (status)
+ goto done;
+ qh->start = frame;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= qh->period
+ ? cpu_to_hc32(fotg210, 1 << uframe)
+ : cpu_to_hc32(fotg210, QH_SMASK);
+ hw->hw_info2 |= c_mask;
+ } else
+ fotg210_dbg(fotg210, "reused qh %p schedule\n", qh);
+
+ /* stuff into the periodic schedule */
+ qh_link_periodic(fotg210, qh);
+done:
+ return status;
+}
+
+static int intr_submit(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ unsigned epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh;
+ int status;
+ struct list_head empty;
+
+ /* get endpoint and transfer/schedule data */
+ epnum = urb->ep->desc.bEndpointAddress;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+
+ /* get qh and force any scheduling errors */
+ INIT_LIST_HEAD(&empty);
+ qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv);
+ if (qh == NULL) {
+ status = -ENOMEM;
+ goto done;
+ }
+ if (qh->qh_state == QH_STATE_IDLE) {
+ status = qh_schedule(fotg210, qh);
+ if (status)
+ goto done;
+ }
+
+ /* then queue the urb's tds to the qh */
+ qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ BUG_ON(qh == NULL);
+
+ /* ... update usbfs periodic stats */
+ fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++;
+
+done:
+ if (unlikely(status))
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+done_not_linked:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ if (status)
+ qtd_list_free(fotg210, urb, qtd_list);
+
+ return status;
+}
+
+static void scan_intr(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+
+ list_for_each_entry_safe(qh, fotg210->qh_scan_next,
+ &fotg210->intr_qh_list, intr_node) {
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fotg210->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fotg210->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(fotg210, qh);
+ if (unlikely(qh->needs_rescan ||
+ (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED)))
+ start_unlink_intr(fotg210, qh);
+ else if (temp != 0)
+ goto rescan;
+ }
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fotg210_iso_stream ops work with both ITD and SITD */
+
+static struct fotg210_iso_stream *
+iso_stream_alloc(gfp_t mem_flags)
+{
+ struct fotg210_iso_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), mem_flags);
+ if (likely(stream != NULL)) {
+ INIT_LIST_HEAD(&stream->td_list);
+ INIT_LIST_HEAD(&stream->free_list);
+ stream->next_uframe = -1;
+ }
+ return stream;
+}
+
+static void
+iso_stream_init(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_iso_stream *stream,
+ struct usb_device *dev,
+ int pipe,
+ unsigned interval
+)
+{
+ u32 buf1;
+ unsigned epnum, maxp;
+ int is_input;
+ long bandwidth;
+ unsigned multi;
+
+ /*
+ * this might be a "high bandwidth" highspeed endpoint,
+ * as encoded in the ep descriptor's wMaxPacket field
+ */
+ epnum = usb_pipeendpoint(pipe);
+ is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
+ maxp = usb_maxpacket(dev, pipe, !is_input);
+ if (is_input)
+ buf1 = (1 << 11);
+ else
+ buf1 = 0;
+
+ maxp = max_packet(maxp);
+ multi = hb_mult(maxp);
+ buf1 |= maxp;
+ maxp *= multi;
+
+ stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum);
+ stream->buf1 = cpu_to_hc32(fotg210, buf1);
+ stream->buf2 = cpu_to_hc32(fotg210, multi);
+
+ /* usbfs wants to report the average usecs per frame tied up
+ * when transfers on this endpoint are scheduled ...
+ */
+ if (dev->speed == USB_SPEED_FULL) {
+ interval <<= 3;
+ stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
+ is_input, 1, maxp));
+ stream->usecs /= 8;
+ } else {
+ stream->highspeed = 1;
+ stream->usecs = HS_USECS_ISO(maxp);
+ }
+ bandwidth = stream->usecs * 8;
+ bandwidth /= interval;
+
+ stream->bandwidth = bandwidth;
+ stream->udev = dev;
+ stream->bEndpointAddress = is_input | epnum;
+ stream->interval = interval;
+ stream->maxp = maxp;
+}
+
+static struct fotg210_iso_stream *
+iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb)
+{
+ unsigned epnum;
+ struct fotg210_iso_stream *stream;
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+
+ epnum = usb_pipeendpoint(urb->pipe);
+ if (usb_pipein(urb->pipe))
+ ep = urb->dev->ep_in[epnum];
+ else
+ ep = urb->dev->ep_out[epnum];
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ stream = ep->hcpriv;
+
+ if (unlikely(stream == NULL)) {
+ stream = iso_stream_alloc(GFP_ATOMIC);
+ if (likely(stream != NULL)) {
+ ep->hcpriv = stream;
+ stream->ep = ep;
+ iso_stream_init(fotg210, stream, urb->dev, urb->pipe,
+ urb->interval);
+ }
+
+ /* if dev->ep[epnum] is a QH, hw is set */
+ } else if (unlikely(stream->hw != NULL)) {
+ fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
+ urb->dev->devpath, epnum,
+ usb_pipein(urb->pipe) ? "in" : "out");
+ stream = NULL;
+ }
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return stream;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fotg210_iso_sched ops can be ITD-only or SITD-only */
+
+static struct fotg210_iso_sched *
+iso_sched_alloc(unsigned packets, gfp_t mem_flags)
+{
+ struct fotg210_iso_sched *iso_sched;
+ int size = sizeof(*iso_sched);
+
+ size += packets * sizeof(struct fotg210_iso_packet);
+ iso_sched = kzalloc(size, mem_flags);
+ if (likely(iso_sched != NULL))
+ INIT_LIST_HEAD(&iso_sched->td_list);
+
+ return iso_sched;
+}
+
+static inline void
+itd_sched_init(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_iso_sched *iso_sched,
+ struct fotg210_iso_stream *stream,
+ struct urb *urb
+)
+{
+ unsigned i;
+ dma_addr_t dma = urb->transfer_dma;
+
+ /* how many uframes are needed for these transfers */
+ iso_sched->span = urb->number_of_packets * stream->interval;
+
+ /* figure out per-uframe itd fields that we'll need later
+ * when we fit new itds into the schedule.
+ */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
+ unsigned length;
+ dma_addr_t buf;
+ u32 trans;
+
+ length = urb->iso_frame_desc[i].length;
+ buf = dma + urb->iso_frame_desc[i].offset;
+
+ trans = FOTG210_ISOC_ACTIVE;
+ trans |= buf & 0x0fff;
+ if (unlikely(((i + 1) == urb->number_of_packets))
+ && !(urb->transfer_flags & URB_NO_INTERRUPT))
+ trans |= FOTG210_ITD_IOC;
+ trans |= length << 16;
+ uframe->transaction = cpu_to_hc32(fotg210, trans);
+
+ /* might need to cross a buffer page within a uframe */
+ uframe->bufp = (buf & ~(u64)0x0fff);
+ buf += length;
+ if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
+ uframe->cross = 1;
+ }
+}
+
+static void
+iso_sched_free(
+ struct fotg210_iso_stream *stream,
+ struct fotg210_iso_sched *iso_sched
+)
+{
+ if (!iso_sched)
+ return;
+ /* caller must hold fotg210->lock!*/
+ list_splice(&iso_sched->td_list, &stream->free_list);
+ kfree(iso_sched);
+}
+
+static int
+itd_urb_transaction(
+ struct fotg210_iso_stream *stream,
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ gfp_t mem_flags
+)
+{
+ struct fotg210_itd *itd;
+ dma_addr_t itd_dma;
+ int i;
+ unsigned num_itds;
+ struct fotg210_iso_sched *sched;
+ unsigned long flags;
+
+ sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+ if (unlikely(sched == NULL))
+ return -ENOMEM;
+
+ itd_sched_init(fotg210, sched, stream, urb);
+
+ if (urb->interval < 8)
+ num_itds = 1 + (sched->span + 7) / 8;
+ else
+ num_itds = urb->number_of_packets;
+
+ /* allocate/init ITDs */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (i = 0; i < num_itds; i++) {
+
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
+ */
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
+ struct fotg210_itd, itd_list);
+ if (itd->frame == fotg210->now_frame)
+ goto alloc_itd;
+ list_del(&itd->itd_list);
+ itd_dma = itd->itd_dma;
+ } else {
+ alloc_itd:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
+ &itd_dma);
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (!itd) {
+ iso_sched_free(stream, sched);
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ memset(itd, 0, sizeof(*itd));
+ itd->itd_dma = itd_dma;
+ list_add(&itd->itd_list, &sched->td_list);
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ /* temporarily store schedule info in hcpriv */
+ urb->hcpriv = sched;
+ urb->error_count = 0;
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+itd_slot_ok(
+ struct fotg210_hcd *fotg210,
+ u32 mod,
+ u32 uframe,
+ u8 usecs,
+ u32 period
+)
+{
+ uframe %= period;
+ do {
+ /* can't commit more than uframe_periodic_max usec */
+ if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7)
+ > (fotg210->uframe_periodic_max - usecs))
+ return 0;
+
+ /* we know urb->interval is 2^N uframes */
+ uframe += period;
+ } while (uframe < mod);
+ return 1;
+}
+
+/*
+ * This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.) That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than fotg210's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler!
+ */
+
+#define SCHEDULE_SLOP 80 /* microframes */
+
+static int
+iso_stream_schedule(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct fotg210_iso_stream *stream
+)
+{
+ u32 now, next, start, period, span;
+ int status;
+ unsigned mod = fotg210->periodic_size << 3;
+ struct fotg210_iso_sched *sched = urb->hcpriv;
+
+ period = urb->interval;
+ span = sched->span;
+
+ if (span > mod - SCHEDULE_SLOP) {
+ fotg210_dbg(fotg210, "iso request %p too long\n", urb);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ now = fotg210_read_frame_index(fotg210) & (mod - 1);
+
+ /* Typical case: reuse current schedule, stream is still active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc), but if there are we'll take the next
+ * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+ */
+ if (likely(!list_empty(&stream->td_list))) {
+ u32 excess;
+
+ /* For high speed devices, allow scheduling within the
+ * isochronous scheduling threshold. For full speed devices
+ * and Intel PCI-based controllers, don't (work around for
+ * Intel ICH9 bug).
+ */
+ if (!stream->highspeed && fotg210->fs_i_thresh)
+ next = now + fotg210->i_thresh;
+ else
+ next = now;
+
+ /* Fell behind (by up to twice the slop amount)?
+ * We decide based on the time of the last currently-scheduled
+ * slot, not the time of the next available slot.
+ */
+ excess = (stream->next_uframe - period - next) & (mod - 1);
+ if (excess >= mod - 2 * SCHEDULE_SLOP)
+ start = next + excess - mod + period *
+ DIV_ROUND_UP(mod - excess, period);
+ else
+ start = next + excess + period;
+ if (start - now >= mod) {
+ fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now - period, period,
+ mod);
+ status = -EFBIG;
+ goto fail;
+ }
+ }
+
+ /* need to schedule; when's the next (u)frame we could start?
+ * this is bigger than fotg210->i_thresh allows; scheduling itself
+ * isn't free, the slop should handle reasonably slow cpus. it
+ * can also help high bandwidth if the dma and irq loads don't
+ * jump until after the queue is primed.
+ */
+ else {
+ int done = 0;
+ start = SCHEDULE_SLOP + (now & ~0x07);
+
+ /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (itd_slot_ok(fotg210, mod, start,
+ stream->usecs, period))
+ done = 1;
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
+ urb, now, now + mod);
+ status = -ENOSPC;
+ goto fail;
+ }
+ }
+
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start - now + span - period
+ >= mod - 2 * SCHEDULE_SLOP)) {
+ fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now, span - period,
+ mod - 2 * SCHEDULE_SLOP);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ stream->next_uframe = start & (mod - 1);
+
+ /* report high speed start in uframes; full speed, in frames */
+ urb->start_frame = stream->next_uframe;
+ if (!stream->highspeed)
+ urb->start_frame >>= 3;
+
+ /* Make sure scan_isoc() sees these */
+ if (fotg210->isoc_count == 0)
+ fotg210->next_frame = now >> 3;
+ return 0;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream,
+ struct fotg210_itd *itd)
+{
+ int i;
+
+ /* it's been recently zeroed */
+ itd->hw_next = FOTG210_LIST_END(fotg210);
+ itd->hw_bufp[0] = stream->buf0;
+ itd->hw_bufp[1] = stream->buf1;
+ itd->hw_bufp[2] = stream->buf2;
+
+ for (i = 0; i < 8; i++)
+ itd->index[i] = -1;
+
+ /* All other fields are filled when scheduling */
+}
+
+static inline void
+itd_patch(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_itd *itd,
+ struct fotg210_iso_sched *iso_sched,
+ unsigned index,
+ u16 uframe
+)
+{
+ struct fotg210_iso_packet *uf = &iso_sched->packet[index];
+ unsigned pg = itd->pg;
+
+ uframe &= 0x07;
+ itd->index[uframe] = index;
+
+ itd->hw_transaction[uframe] = uf->transaction;
+ itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
+ itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
+
+ /* iso_frame_desc[].offset must be strictly increasing */
+ if (unlikely(uf->cross)) {
+ u64 bufp = uf->bufp + 4096;
+
+ itd->pg = ++pg;
+ itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
+ }
+}
+
+static inline void
+itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd)
+{
+ union fotg210_shadow *prev = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip any iso nodes which might belong to previous microframes */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fotg210, prev, type);
+ hw_p = shadow_next_periodic(fotg210, &here, type);
+ here = *prev;
+ }
+
+ itd->itd_next = here;
+ itd->hw_next = *hw_p;
+ prev->itd = itd;
+ itd->frame = frame;
+ wmb();
+ *hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ unsigned mod,
+ struct fotg210_iso_stream *stream
+)
+{
+ int packet;
+ unsigned next_uframe, uframe, frame;
+ struct fotg210_iso_sched *iso_sched = urb->hcpriv;
+ struct fotg210_itd *itd;
+
+ next_uframe = stream->next_uframe & (mod - 1);
+
+ if (unlikely(list_empty(&stream->td_list))) {
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+ += stream->bandwidth;
+ fotg210_dbg(fotg210,
+ "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
+ urb->dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ urb->interval,
+ next_uframe >> 3, next_uframe & 0x7);
+ }
+
+ /* fill iTDs uframe by uframe */
+ for (packet = 0, itd = NULL; packet < urb->number_of_packets;) {
+ if (itd == NULL) {
+ /* ASSERT: we have all necessary itds */
+
+ /* ASSERT: no itds for this endpoint in this uframe */
+
+ itd = list_entry(iso_sched->td_list.next,
+ struct fotg210_itd, itd_list);
+ list_move_tail(&itd->itd_list, &stream->td_list);
+ itd->stream = stream;
+ itd->urb = urb;
+ itd_init(fotg210, stream, itd);
+ }
+
+ uframe = next_uframe & 0x07;
+ frame = next_uframe >> 3;
+
+ itd_patch(fotg210, itd, iso_sched, packet, uframe);
+
+ next_uframe += stream->interval;
+ next_uframe &= mod - 1;
+ packet++;
+
+ /* link completed itds into the schedule */
+ if (((next_uframe >> 3) != frame)
+ || packet == urb->number_of_packets) {
+ itd_link(fotg210, frame & (fotg210->periodic_size - 1),
+ itd);
+ itd = NULL;
+ }
+ }
+ stream->next_uframe = next_uframe;
+
+ /* don't need that schedule data any more */
+ iso_sched_free(stream, iso_sched);
+ urb->hcpriv = NULL;
+
+ ++fotg210->isoc_count;
+ enable_periodic(fotg210);
+}
+
+#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
+ FOTG210_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD. Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly. That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs. It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+ struct urb *urb = itd->urb;
+ struct usb_iso_packet_descriptor *desc;
+ u32 t;
+ unsigned uframe;
+ int urb_index = -1;
+ struct fotg210_iso_stream *stream = itd->stream;
+ struct usb_device *dev;
+ bool retval = false;
+
+ /* for each uframe with a packet */
+ for (uframe = 0; uframe < 8; uframe++) {
+ if (likely(itd->index[uframe] == -1))
+ continue;
+ urb_index = itd->index[uframe];
+ desc = &urb->iso_frame_desc[urb_index];
+
+ t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
+ itd->hw_transaction[uframe] = 0;
+
+ /* report transfer status */
+ if (unlikely(t & ISO_ERRS)) {
+ urb->error_count++;
+ if (t & FOTG210_ISOC_BUF_ERR)
+ desc->status = usb_pipein(urb->pipe)
+ ? -ENOSR /* hc couldn't read */
+ : -ECOMM; /* hc couldn't write */
+ else if (t & FOTG210_ISOC_BABBLE)
+ desc->status = -EOVERFLOW;
+ else /* (t & FOTG210_ISOC_XACTERR) */
+ desc->status = -EPROTO;
+
+ /* HC need not update length with this error */
+ if (!(t & FOTG210_ISOC_BABBLE)) {
+ desc->actual_length =
+ fotg210_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ }
+ } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
+ desc->status = 0;
+ desc->actual_length = fotg210_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ } else {
+ /* URB was too late */
+ desc->status = -EXDEV;
+ }
+ }
+
+ /* handle completion now? */
+ if (likely((urb_index + 1) != urb->number_of_packets))
+ goto done;
+
+ /* ASSERT: it's really the last itd for this urb
+ list_for_each_entry (itd, &stream->td_list, itd_list)
+ BUG_ON (itd->urb == urb);
+ */
+
+ /* give urb back to the driver; completion often (re)submits */
+ dev = urb->dev;
+ fotg210_urb_done(fotg210, urb, 0);
+ retval = true;
+ urb = NULL;
+
+ --fotg210->isoc_count;
+ disable_periodic(fotg210);
+
+ if (unlikely(list_is_singular(&stream->td_list))) {
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+ -= stream->bandwidth;
+ fotg210_dbg(fotg210,
+ "deschedule devp %s ep%d%s-iso\n",
+ dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+ }
+
+done:
+ itd->urb = NULL;
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &fotg210->cached_itd_list);
+ start_free_itds(fotg210);
+ }
+
+ return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int status = -EINVAL;
+ unsigned long flags;
+ struct fotg210_iso_stream *stream;
+
+ /* Get iso_stream head */
+ stream = iso_stream_find(fotg210, urb);
+ if (unlikely(stream == NULL)) {
+ fotg210_dbg(fotg210, "can't get iso stream\n");
+ return -ENOMEM;
+ }
+ if (unlikely(urb->interval != stream->interval &&
+ fotg210_port_speed(fotg210, 0) ==
+ USB_PORT_STAT_HIGH_SPEED)) {
+ fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
+ stream->interval, urb->interval);
+ goto done;
+ }
+
+#ifdef FOTG210_URB_TRACE
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->transfer_buffer_length,
+ urb->number_of_packets, urb->interval,
+ stream);
+#endif
+
+ /* allocate ITDs w/o locking anything */
+ status = itd_urb_transaction(stream, fotg210, urb, mem_flags);
+ if (unlikely(status < 0)) {
+ fotg210_dbg(fotg210, "can't init itds\n");
+ goto done;
+ }
+
+ /* schedule ... need to lock */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+ status = iso_stream_schedule(fotg210, urb, stream);
+ if (likely(status == 0))
+ itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
+ else
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ done_not_linked:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ done:
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_isoc(struct fotg210_hcd *fotg210)
+{
+ unsigned uf, now_frame, frame;
+ unsigned fmask = fotg210->periodic_size - 1;
+ bool modified, live;
+
+ /*
+ * When running, scan from last scan point up to "now"
+ * else clean up by scanning everything that's left.
+ * Touches as few pages as possible: cache-friendly.
+ */
+ if (fotg210->rh_state >= FOTG210_RH_RUNNING) {
+ uf = fotg210_read_frame_index(fotg210);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
+ } else {
+ now_frame = (fotg210->next_frame - 1) & fmask;
+ live = false;
+ }
+ fotg210->now_frame = now_frame;
+
+ frame = fotg210->next_frame;
+ for (;;) {
+ union fotg210_shadow q, *q_p;
+ __hc32 type, *hw_p;
+
+restart:
+ /* scan each element in frame's queue for completions */
+ q_p = &fotg210->pshadow[frame];
+ hw_p = &fotg210->periodic[frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ modified = false;
+
+ while (q.ptr != NULL) {
+ switch (hc32_to_cpu(fotg210, type)) {
+ case Q_TYPE_ITD:
+ /* If this ITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (frame == now_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(fotg210))
+ break;
+ }
+ if (uf < 8) {
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210,
+ q.itd->hw_next);
+ q = *q_p;
+ break;
+ }
+ }
+
+ /* Take finished ITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion. HC won't cache the
+ * pointer for much longer, if at all.
+ */
+ *q_p = q.itd->itd_next;
+ *hw_p = q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
+ wmb();
+ modified = itd_complete(fotg210, q.itd);
+ q = *q_p;
+ break;
+ default:
+ fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
+ type, frame, q.ptr);
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
+ q.ptr = NULL;
+ break;
+ }
+
+ /* assume completion callbacks modify the queue */
+ if (unlikely(modified && fotg210->isoc_count > 0))
+ goto restart;
+ }
+
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
+ break;
+ frame = (frame + 1) & fmask;
+ }
+ fotg210->next_frame = now_frame;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fotg210_hcd *fotg210;
+ int n;
+
+ fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fotg210_hcd *fotg210;
+ unsigned uframe_periodic_max;
+ unsigned frame, uframe;
+ unsigned short allocated_max;
+ unsigned long flags;
+ ssize_t ret;
+
+ fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
+ uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * every microframe in the schedule to see whether the decrease is
+ * possible.
+ */
+ if (uframe_periodic_max < fotg210->uframe_periodic_max) {
+ allocated_max = 0;
+
+ for (frame = 0; frame < fotg210->periodic_size; ++frame)
+ for (uframe = 0; uframe < 7; ++uframe)
+ allocated_max = max(allocated_max,
+ periodic_usecs(fotg210, frame, uframe));
+
+ if (allocated_max > uframe_periodic_max) {
+ fotg210_info(fotg210,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ fotg210_info(fotg210, "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
+ 100 * uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
+
+ fotg210->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return ret;
+}
+
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max,
+ store_uframe_periodic_max);
+
+static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
+{
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+ int i = 0;
+
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
+{
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
+/*-------------------------------------------------------------------------*/
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
+{
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+ fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
+}
+
+/*
+ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
+{
+ fotg210_halt(fotg210);
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210_turn_off_all_ports(fotg210);
+ spin_unlock_irq(&fotg210->lock);
+}
+
+/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void fotg210_shutdown(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->shutdown = true;
+ fotg210->rh_state = FOTG210_RH_STOPPING;
+ fotg210->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fotg210->lock);
+
+ fotg210_silence_controller(fotg210);
+
+ hrtimer_cancel(&fotg210->hrtimer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * fotg210_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping fotg210->lock.
+ */
+static void fotg210_work(struct fotg210_hcd *fotg210)
+{
+ /* another CPU may drop fotg210->lock during a schedule scan while
+ * it reports urb completions. this flag guards against bogus
+ * attempts at re-entrant schedule scanning.
+ */
+ if (fotg210->scanning) {
+ fotg210->need_rescan = true;
+ return;
+ }
+ fotg210->scanning = true;
+
+ rescan:
+ fotg210->need_rescan = false;
+ if (fotg210->async_count)
+ scan_async(fotg210);
+ if (fotg210->intr_count > 0)
+ scan_intr(fotg210);
+ if (fotg210->isoc_count > 0)
+ scan_isoc(fotg210);
+ if (fotg210->need_rescan)
+ goto rescan;
+ fotg210->scanning = false;
+
+ /* the IO watchdog guards against hardware or driver bugs that
+ * misplace IRQs, and should let us run completely without IRQs.
+ * such lossage has been observed on both VT6202 and VT8235.
+ */
+ turn_on_io_watchdog(fotg210);
+}
+
+/*
+ * Called when the fotg210_hcd module is removed.
+ */
+static void fotg210_stop(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+ fotg210_dbg(fotg210, "stop\n");
+
+ /* no more interrupts ... */
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fotg210->lock);
+
+ fotg210_quiesce(fotg210);
+ fotg210_silence_controller(fotg210);
+ fotg210_reset(fotg210);
+
+ hrtimer_cancel(&fotg210->hrtimer);
+ remove_sysfs_files(fotg210);
+ remove_debug_files(fotg210);
+
+ /* root hub is shut down separately (first, when possible) */
+ spin_lock_irq(&fotg210->lock);
+ end_free_itds(fotg210);
+ spin_unlock_irq(&fotg210->lock);
+ fotg210_mem_cleanup(fotg210);
+
+#ifdef FOTG210_STATS
+ fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
+ fotg210->stats.lost_iaa);
+ fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
+ fotg210->stats.complete, fotg210->stats.unlink);
+#endif
+
+ dbg_status(fotg210, "fotg210_stop completed",
+ fotg210_readl(fotg210, &fotg210->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int hcd_fotg210_init(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ int retval;
+ u32 hcc_params;
+ struct fotg210_qh_hw *hw;
+
+ spin_lock_init(&fotg210->lock);
+
+ /*
+ * keep io watchdog by default, those good HCDs could turn off it later
+ */
+ fotg210->need_io_watchdog = 1;
+
+ hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ fotg210->hrtimer.function = fotg210_hrtimer_func;
+ fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+ hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ /*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ fotg210->uframe_periodic_max = 100;
+
+ /*
+ * hw default: 1K periodic list heads, one per frame.
+ * periodic_size can shrink by USBCMD update if hcc_params allows.
+ */
+ fotg210->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&fotg210->intr_qh_list);
+ INIT_LIST_HEAD(&fotg210->cached_itd_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (FOTG210_TUNE_FLS) {
+ case 0:
+ fotg210->periodic_size = 1024;
+ break;
+ case 1:
+ fotg210->periodic_size = 512;
+ break;
+ case 2:
+ fotg210->periodic_size = 256;
+ break;
+ default:
+ BUG();
+ }
+ }
+ retval = fotg210_mem_init(fotg210, GFP_KERNEL);
+ if (retval < 0)
+ return retval;
+
+ /* controllers may cache some of the periodic schedule ... */
+ fotg210->i_thresh = 2;
+
+ /*
+ * dedicate a qh for the async ring head, since we couldn't unlink
+ * a 'real' qh without stopping the async schedule [4.8]. use it
+ * as the 'reclamation list head' too.
+ * its dummy is used in hw_alt_next of many tds, to prevent the qh
+ * from automatically advancing to the next td after short reads.
+ */
+ fotg210->async->qh_next.qh = NULL;
+ hw = fotg210->async->hw;
+ hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD);
+ hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+ hw->hw_qtd_next = FOTG210_LIST_END(fotg210);
+ fotg210->async->qh_state = QH_STATE_LINKED;
+ hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma);
+
+ /* clear interrupt enables, set irq latency */
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+ * schedule by avoiding QH fetches between transfers.
+ *
+ * With fast usb storage devices and NForce2, "park" seems to
+ * make problems: throughput reduction (!), data errors...
+ */
+ if (park) {
+ park = min_t(unsigned, park, 3);
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+ fotg210_dbg(fotg210, "park %d\n", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ temp &= ~(3 << 2);
+ temp |= (FOTG210_TUNE_FLS << 2);
+ }
+ fotg210->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
+ return 0;
+}
+
+/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
+static int fotg210_run(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ u32 hcc_params;
+
+ hcd->uses_new_polling = 1;
+
+ /* EHCI spec section 4.1 */
+
+ fotg210_writel(fotg210, fotg210->periodic_dma,
+ &fotg210->regs->frame_list);
+ fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
+ &fotg210->regs->async_next);
+
+ /*
+ * hcc_params controls whether fotg210->regs->segment must (!!!)
+ * be used; it constrains QH/ITD/SITD and QTD locations.
+ * pci_pool consistent memory always uses segment zero.
+ * streaming mappings for I/O buffers, like pci_map_single(),
+ * can return segments above 4GB, if the device allows.
+ *
+ * NOTE: the dma mask is visible through dma_supported(), so
+ * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+ * Scsi_Host.highmem_io, and so forth. It's readonly to all
+ * host side drivers though.
+ */
+ hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ /*
+ * Philips, Intel, and maybe others need CMD_RUN before the
+ * root hub will detect new devices (why?); NEC doesn't
+ */
+ fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ fotg210->command |= CMD_RUN;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+ dbg_cmd(fotg210, "init", fotg210->command);
+
+ /*
+ * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+ * are explicitly handed to companion controller(s), so no TT is
+ * involved with the root hub. (Except where one is integrated,
+ * and there's no companion controller unless maybe for USB OTG.)
+ *
+ * Turning on the CF flag will transfer ownership of all ports
+ * from the companions to the EHCI controller. If any of the
+ * companions are in the middle of a port reset at the time, it
+ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
+ * guarantees that no resets are in progress. After we set CF,
+ * a short delay lets the hardware catch up; new resets shouldn't
+ * be started before the port switching actions could complete.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ fotg210->rh_state = FOTG210_RH_RUNNING;
+ /* unblock posted writes */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ msleep(5);
+ up_write(&ehci_cf_port_reset_rwsem);
+ fotg210->last_periodic_enable = ktime_get_real();
+
+ temp = HC_VERSION(fotg210,
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ fotg210_info(fotg210,
+ "USB %x.%x started, EHCI %x.%02x\n",
+ ((fotg210->sbrn & 0xf0)>>4), (fotg210->sbrn & 0x0f),
+ temp >> 8, temp & 0xff);
+
+ fotg210_writel(fotg210, INTR_MASK,
+ &fotg210->regs->intr_enable); /* Turn On Interrupts */
+
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ create_debug_files(fotg210);
+ create_sysfs_files(fotg210);
+
+ return 0;
+}
+
+static int fotg210_setup(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ int retval;
+
+ fotg210->regs = (void __iomem *)fotg210->caps +
+ HC_LENGTH(fotg210,
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ dbg_hcs_params(fotg210, "reset");
+ dbg_hcc_params(fotg210, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ fotg210->hcs_params = fotg210_readl(fotg210,
+ &fotg210->caps->hcs_params);
+
+ fotg210->sbrn = HCD_USB2;
+
+ /* data structure init */
+ retval = hcd_fotg210_init(hcd);
+ if (retval)
+ return retval;
+
+ retval = fotg210_halt(fotg210);
+ if (retval)
+ return retval;
+
+ fotg210_reset(fotg210);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
+
+ spin_lock(&fotg210->lock);
+
+ status = fotg210_readl(fotg210, &fotg210->regs->status);
+
+ /* e.g. cardbus physical eject */
+ if (status == ~(u32) 0) {
+ fotg210_dbg(fotg210, "device removed\n");
+ goto dead;
+ }
+
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
+ /* Shared IRQ? */
+ if (!masked_status ||
+ unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
+ spin_unlock(&fotg210->lock);
+ return IRQ_NONE;
+ }
+
+ /* clear (just) interrupts */
+ fotg210_writel(fotg210, masked_status, &fotg210->regs->status);
+ cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+ bh = 0;
+
+ /* unrequested/ignored: Frame List Rollover */
+ dbg_status(fotg210, "irq", status);
+
+ /* INT, ERR, and IAA interrupt rates can be throttled */
+
+ /* normal [4.15.1.2] or error [4.15.1.1] completion */
+ if (likely((status & (STS_INT|STS_ERR)) != 0)) {
+ if (likely((status & STS_ERR) == 0))
+ COUNT(fotg210->stats.normal);
+ else
+ COUNT(fotg210->stats.error);
+ bh = 1;
+ }
+
+ /* complete the unlinking of some qh [4.15.2.3] */
+ if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ fotg210->enabled_hrtimer_events &=
+ ~BIT(FOTG210_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG)
+ ++fotg210->next_hrtimer_event;
+
+ /* guard against (alleged) silicon errata */
+ if (cmd & CMD_IAAD)
+ fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
+ if (fotg210->async_iaa) {
+ COUNT(fotg210->stats.iaa);
+ end_unlink_async(fotg210);
+ } else
+ fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
+ }
+
+ /* remote wakeup [4.3.1] */
+ if (status & STS_PCD) {
+ int pstatus;
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+ /* kick root hub later */
+ pcd_status = status;
+
+ /* resume root hub? */
+ if (fotg210->rh_state == FOTG210_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+
+ pstatus = fotg210_readl(fotg210, status_reg);
+
+ if (test_bit(0, &fotg210->suspended_ports) &&
+ ((pstatus & PORT_RESUME) ||
+ !(pstatus & PORT_SUSPEND)) &&
+ (pstatus & PORT_PE) &&
+ fotg210->reset_done[0] == 0) {
+
+ /* start 20 msec resume signaling from this port,
+ * and make khubd collect PORT_STAT_C_SUSPEND to
+ * stop that signaling. Use 5 ms extra for safety,
+ * like usb_port_resume() does.
+ */
+ fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25);
+ set_bit(0, &fotg210->resuming_ports);
+ fotg210_dbg(fotg210, "port 1 remote wakeup\n");
+ mod_timer(&hcd->rh_timer, fotg210->reset_done[0]);
+ }
+ }
+
+ /* PCI errors [4.15.2.4] */
+ if (unlikely((status & STS_FATAL) != 0)) {
+ fotg210_err(fotg210, "fatal error\n");
+ dbg_cmd(fotg210, "fatal", cmd);
+ dbg_status(fotg210, "fatal", status);
+dead:
+ usb_hc_died(hcd);
+
+ /* Don't let the controller do anything more */
+ fotg210->shutdown = true;
+ fotg210->rh_state = FOTG210_RH_STOPPING;
+ fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ fotg210_writel(fotg210, fotg210->command,
+ &fotg210->regs->command);
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+ fotg210_handle_controller_death(fotg210);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
+ }
+
+ if (bh)
+ fotg210_work(fotg210);
+ spin_unlock(&fotg210->lock);
+ if (pcd_status)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE: control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int fotg210_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags
+) {
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct list_head qtd_list;
+
+ INIT_LIST_HEAD(&qtd_list);
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ /* qh_completions() code doesn't handle all the fault cases
+ * in multi-TD control transfers. Even 1KB is rare anyway.
+ */
+ if (urb->transfer_buffer_length > (16 * 1024))
+ return -EMSGSIZE;
+ /* FALLTHROUGH */
+ /* case PIPE_BULK: */
+ default:
+ if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return submit_async(fotg210, urb, &qtd_list, mem_flags);
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return intr_submit(fotg210, urb, &qtd_list, mem_flags);
+
+ case PIPE_ISOCHRONOUS:
+ return itd_submit(fotg210, urb, mem_flags);
+ }
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
+ switch (usb_pipetype(urb->pipe)) {
+ /* case PIPE_CONTROL: */
+ /* case PIPE_BULK:*/
+ default:
+ qh = (struct fotg210_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_async(fotg210, qh);
+ break;
+ case QH_STATE_UNLINK:
+ case QH_STATE_UNLINK_WAIT:
+ /* already started */
+ break;
+ case QH_STATE_IDLE:
+ /* QH might be waiting for a Clear-TT-Buffer */
+ qh_completions(fotg210, qh);
+ break;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ qh = (struct fotg210_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_intr(fotg210, qh);
+ break;
+ case QH_STATE_IDLE:
+ qh_completions(fotg210, qh);
+ break;
+ default:
+ fotg210_dbg(fotg210, "bogus qh %p state %d\n",
+ qh, qh->qh_state);
+ goto done;
+ }
+ break;
+
+ case PIPE_ISOCHRONOUS:
+ /* itd... */
+
+ /* wait till next completion, do it then. */
+ /* completion irqs can wait up to 1024 msec, */
+ break;
+ }
+done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* bulk qh holds the data toggle */
+
+static void
+fotg210_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ unsigned long flags;
+ struct fotg210_qh *qh, *tmp;
+
+ /* ASSERT: any requests/urbs are being unlinked */
+ /* ASSERT: nobody can be submitting urbs for this any more */
+
+rescan:
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh = ep->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* endpoints can be iso streams. for now, we don't
+ * accelerate iso completions ... so spin a while.
+ */
+ if (qh->hw == NULL) {
+ struct fotg210_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ kfree(stream);
+ goto done;
+ }
+
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ qh->qh_state = QH_STATE_IDLE;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ for (tmp = fotg210->async->qh_next.qh;
+ tmp && tmp != qh;
+ tmp = tmp->qh_next.qh)
+ continue;
+ /* periodic qh self-unlinks on empty, and a COMPLETING qh
+ * may already be unlinked.
+ */
+ if (tmp)
+ start_unlink_async(fotg210, qh);
+ /* FALL THROUGH */
+ case QH_STATE_UNLINK: /* wait for hw to finish? */
+ case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case QH_STATE_IDLE: /* fully unlinked */
+ if (qh->clearing_tt)
+ goto idle_timeout;
+ if (list_empty(&qh->qtd_list)) {
+ qh_destroy(fotg210, qh);
+ break;
+ }
+ /* else FALL THROUGH */
+ default:
+ /* caller was supposed to have unlinked any requests;
+ * that's not our job. just leak this memory.
+ */
+ fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
+ qh, ep->desc.bEndpointAddress, qh->qh_state,
+ list_empty(&qh->qtd_list) ? "" : "(has tds)");
+ break;
+ }
+ done:
+ ep->hcpriv = NULL;
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void
+fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ unsigned long flags;
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ usb_settoggle(qh->dev, epnum, is_out, 0);
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else if (qh->qh_state == QH_STATE_LINKED ||
+ qh->qh_state == QH_STATE_COMPLETING) {
+
+ /* The toggle value in the QH can't be updated
+ * while the QH is active. Unlink it now;
+ * re-linking will call qh_refresh().
+ */
+ if (eptype == USB_ENDPOINT_XFER_BULK)
+ start_unlink_async(fotg210, qh);
+ else
+ start_unlink_intr(fotg210, qh);
+ }
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static int fotg210_get_frame(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ return (fotg210_read_frame_index(fotg210) >> 3) %
+ fotg210->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The EHCI in ChipIdea HDRC cannot be a separate module or device,
+ * because its registers (and irq) are shared between host/gadget/otg
+ * functions and in order to facilitate role switching we cannot
+ * give the fotg210 driver exclusive access to those.
+ */
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct hc_driver fotg210_fotg210_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Faraday USB2.0 Host Controller",
+ .hcd_priv_size = sizeof(struct fotg210_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = fotg210_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = hcd_fotg210_init,
+ .start = fotg210_run,
+ .stop = fotg210_stop,
+ .shutdown = fotg210_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = fotg210_urb_enqueue,
+ .urb_dequeue = fotg210_urb_dequeue,
+ .endpoint_disable = fotg210_endpoint_disable,
+ .endpoint_reset = fotg210_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = fotg210_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = fotg210_hub_status_data,
+ .hub_control = fotg210_hub_control,
+ .bus_suspend = fotg210_bus_suspend,
+ .bus_resume = fotg210_bus_resume,
+
+ .relinquish_port = fotg210_relinquish_port,
+ .port_handed_over = fotg210_port_handed_over,
+
+ .clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete,
+};
+
+static void fotg210_init(struct fotg210_hcd *fotg210)
+{
+ u32 value;
+
+ iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
+ &fotg210->regs->gmir);
+
+ value = ioread32(&fotg210->regs->otgcsr);
+ value &= ~OTGCSR_A_BUS_DROP;
+ value |= OTGCSR_A_BUS_REQ;
+ iowrite32(value, &fotg210->regs->otgcsr);
+}
+
+/**
+ * fotg210_hcd_probe - initialize faraday FOTG210 HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int fotg210_hcd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int retval = -ENODEV;
+ struct fotg210_hcd *fotg210;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pdev->dev.power.power_state = PMSG_ON;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(dev));
+ return -ENODEV;
+ }
+
+ irq = res->start;
+
+ hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "failed to create hcd with err %d\n", retval);
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->has_tt = 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ fotg210_fotg210_hc_driver.description)) {
+ dev_dbg(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto fail_request_resource;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->regs = ioremap_nocache(res->start, resource_size(res));
+ if (hcd->regs == NULL) {
+ dev_dbg(dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto fail_ioremap;
+ }
+
+ fotg210 = hcd_to_fotg210(hcd);
+
+ fotg210->caps = hcd->regs;
+
+ retval = fotg210_setup(hcd);
+ if (retval)
+ goto fail_add_hcd;
+
+ fotg210_init(fotg210);
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(dev, "failed to add hcd with err %d\n", retval);
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return retval;
+
+fail_add_hcd:
+ iounmap(hcd->regs);
+fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
+ return retval;
+}
+
+/**
+ * fotg210_hcd_remove - shutdown processing for EHCI HCDs
+ * @dev: USB Host Controller being removed
+ *
+ */
+static int fotg210_hcd_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ if (!hcd)
+ return 0;
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver fotg210_hcd_driver = {
+ .driver = {
+ .name = "fotg210-hcd",
+ },
+ .probe = fotg210_hcd_probe,
+ .remove = fotg210_hcd_remove,
+};
+
+static int __init fotg210_hcd_init(void)
+{
+ int retval = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+ test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+ pr_warn(KERN_WARNING "Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
+
+ pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
+ hcd_name,
+ sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd),
+ sizeof(struct fotg210_itd));
+
+ fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
+ if (!fotg210_debug_root) {
+ retval = -ENOENT;
+ goto err_debug;
+ }
+
+ retval = platform_driver_register(&fotg210_hcd_driver);
+ if (retval < 0)
+ goto clean;
+ return retval;
+
+ platform_driver_unregister(&fotg210_hcd_driver);
+clean:
+ debugfs_remove(fotg210_debug_root);
+ fotg210_debug_root = NULL;
+err_debug:
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ return retval;
+}
+module_init(fotg210_hcd_init);
+
+static void __exit fotg210_hcd_cleanup(void)
+{
+ platform_driver_unregister(&fotg210_hcd_driver);
+ debugfs_remove(fotg210_debug_root);
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(fotg210_hcd_cleanup);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
new file mode 100644
index 00000000000..ac6cd1bfd20
--- /dev/null
+++ b/drivers/usb/host/fotg210.h
@@ -0,0 +1,742 @@
+#ifndef __LINUX_FOTG210_H
+#define __LINUX_FOTG210_H
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#define __hc32 __le32
+#define __hc16 __le16
+
+/* statistics can be kept for tuning/monitoring */
+struct fotg210_stats {
+ /* irq usage */
+ unsigned long normal;
+ unsigned long error;
+ unsigned long iaa;
+ unsigned long lost_iaa;
+
+ /* termination of urbs from core */
+ unsigned long complete;
+ unsigned long unlink;
+};
+
+/* fotg210_hcd->lock guards shared data against other CPUs:
+ * fotg210_hcd: async, unlink, periodic (and shadow), ...
+ * usb_host_endpoint: hcpriv
+ * fotg210_qh: qh_next, qtd_list
+ * fotg210_qtd: qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define FOTG210_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
+
+/*
+ * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
+enum fotg210_rh_state {
+ FOTG210_RH_HALTED,
+ FOTG210_RH_SUSPENDED,
+ FOTG210_RH_RUNNING,
+ FOTG210_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum fotg210_hrtimer_event {
+ FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ FOTG210_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ FOTG210_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ FOTG210_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ FOTG210_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ FOTG210_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ FOTG210_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ FOTG210_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define FOTG210_HRTIMER_NO_EVENT 99
+
+struct fotg210_hcd { /* one per controller */
+ /* timing support */
+ enum fotg210_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
+ /* glue to PCI and HCD framework */
+ struct fotg210_caps __iomem *caps;
+ struct fotg210_regs __iomem *regs;
+ struct fotg210_dbg_port __iomem *debug;
+
+ __u32 hcs_params; /* cached register copy */
+ spinlock_t lock;
+ enum fotg210_rh_state rh_state;
+
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct fotg210_qh *qh_scan_next;
+
+ /* async schedule support */
+ struct fotg210_qh *async;
+ struct fotg210_qh *dummy; /* For AMD quirk use */
+ struct fotg210_qh *async_unlink;
+ struct fotg210_qh *async_unlink_last;
+ struct fotg210_qh *async_iaa;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
+
+ /* periodic schedule support */
+#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
+ unsigned periodic_size;
+ __hc32 *periodic; /* hw periodic table */
+ dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
+ unsigned i_thresh; /* uframes HC might cache */
+
+ union fotg210_shadow *pshadow; /* mirror hw periodic table */
+ struct fotg210_qh *intr_unlink;
+ struct fotg210_qh *intr_unlink_last;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned next_frame; /* scan periodic, start here */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
+ /* max periodic time per uframe */
+ unsigned uframe_periodic_max;
+
+
+ /* list of itds completed while now_frame was still active */
+ struct list_head cached_itd_list;
+ struct fotg210_itd *last_itd_to_free;
+
+ /* per root hub port */
+ unsigned long reset_done[FOTG210_MAX_ROOT_PORTS];
+
+ /* bit vectors (one bit per port) */
+ unsigned long bus_suspended; /* which ports were
+ already suspended at the start of a bus suspend */
+ unsigned long companion_ports; /* which ports are
+ dedicated to the companion controller */
+ unsigned long owned_ports; /* which ports are
+ owned by the companion during a bus suspend */
+ unsigned long port_c_suspend; /* which ports have
+ the change-suspend feature turned on */
+ unsigned long suspended_ports; /* which ports are
+ suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
+
+ /* per-HC memory pools (could be per-bus, but ...) */
+ struct dma_pool *qh_pool; /* qh per active urb */
+ struct dma_pool *qtd_pool; /* one or more per qh */
+ struct dma_pool *itd_pool; /* itd per iso urb */
+
+ unsigned random_frame;
+ unsigned long next_statechange;
+ ktime_t last_periodic_enable;
+ u32 command;
+
+ /* SILICON QUIRKS */
+ unsigned need_io_watchdog:1;
+ unsigned fs_i_thresh:1; /* Intel iso scheduling */
+
+ u8 sbrn; /* packed release number */
+
+ /* irq statistics */
+#ifdef FOTG210_STATS
+ struct fotg210_stats stats;
+# define COUNT(x) ((x)++)
+#else
+# define COUNT(x)
+#endif
+
+ /* debug files */
+ struct dentry *debug_dir;
+};
+
+/* convert between an HCD pointer and the corresponding FOTG210_HCD */
+static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd)
+{
+ return (struct fotg210_hcd *)(hcd->hcd_priv);
+}
+static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210)
+{
+ return container_of((void *) fotg210, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct fotg210_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ * some hosts treat caplength and hciversion as parts of a 32-bit
+ * register, others treat them as two separate registers, this
+ * affects the memory map for big endian controllers.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(fotg210, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+ (fotg210_big_endian_capbase(fotg210) ? 24 : 0)))
+#define HC_VERSION(fotg210, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+ (fotg210_big_endian_capbase(fotg210) ? 0 : 16)))
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct fotg210_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
+
+/* EHCI 1.1 addendum */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved1;
+ /* PORTSC: offset 0x20 */
+ u32 port_status;
+/* 31:23 reserved */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
+ u32 reserved2[19];
+
+ /* OTGCSR: offet 0x70 */
+ u32 otgcsr;
+#define OTGCSR_HOST_SPD_TYP (3 << 22)
+#define OTGCSR_A_BUS_DROP (1 << 5)
+#define OTGCSR_A_BUS_REQ (1 << 4)
+
+ /* OTGISR: offset 0x74 */
+ u32 otgisr;
+#define OTGISR_OVC (1 << 10)
+
+ u32 reserved3[15];
+
+ /* GMIR: offset 0xB4 */
+ u32 gmir;
+#define GMIR_INT_POLARITY (1 << 3) /*Active High*/
+#define GMIR_MHC_INT (1 << 2)
+#define GMIR_MOTG_INT (1 << 1)
+#define GMIR_MDEV_INT (1 << 0)
+};
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct fotg210_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+#include <linux/init.h>
+extern int __init early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from fotg210 host driver to fotg210 debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *hcd);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct fotg210_qtd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.5.1 */
+ __hc32 hw_alt_next; /* see EHCI 3.5.2 */
+ __hc32 hw_token; /* see EHCI 3.5.3 */
+#define QTD_TOGGLE (1 << 31) /* data toggle */
+#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define QTD_IOC (1 << 15) /* interrupt on complete */
+#define QTD_CERR(tok) (((tok)>>10) & 0x3)
+#define QTD_PID(tok) (((tok)>>8) & 0x3)
+#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
+#define QTD_STS_HALT (1 << 6) /* halted on error */
+#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
+#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
+#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
+#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
+#define QTD_STS_STS (1 << 1) /* split transaction state */
+#define QTD_STS_PING (1 << 0) /* issue PING? */
+
+#define ACTIVE_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_ACTIVE)
+#define HALT_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_HALT)
+#define STATUS_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_STS)
+
+ __hc32 hw_buf[5]; /* see EHCI 3.5.4 */
+ __hc32 hw_buf_hi[5]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t qtd_dma; /* qtd address */
+ struct list_head qtd_list; /* sw qtd list */
+ struct urb *urb; /* qtd's urb */
+ size_t length; /* length of buffer */
+} __aligned(32);
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(fotg210) cpu_to_hc32(fotg210, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,fstn}->hw_next */
+#define Q_NEXT_TYPE(fotg210, dma) ((dma) & cpu_to_hc32(fotg210, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD (0 << 1)
+#define Q_TYPE_QH (1 << 1)
+#define Q_TYPE_SITD (2 << 1)
+#define Q_TYPE_FSTN (3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(fotg210, dma) \
+ (cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define FOTG210_LIST_END(fotg210) \
+ cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure. That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule. Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union fotg210_shadow {
+ struct fotg210_qh *qh; /* Q_TYPE_QH */
+ struct fotg210_itd *itd; /* Q_TYPE_ITD */
+ struct fotg210_fstn *fstn; /* Q_TYPE_FSTN */
+ __hc32 *hw_next; /* (all types) */
+ void *ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct fotg210_qh_hw {
+ __hc32 hw_next; /* see EHCI 3.6.1 */
+ __hc32 hw_info1; /* see EHCI 3.6.2 */
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
+ __hc32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_SMASK 0x000000ff
+#define QH_CMASK 0x0000ff00
+#define QH_HUBADDR 0x007f0000
+#define QH_HUBPORT 0x3f800000
+#define QH_MULT 0xc0000000
+ __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
+
+ /* qtd overlay (hardware parts of a struct fotg210_qtd) */
+ __hc32 hw_qtd_next;
+ __hc32 hw_alt_next;
+ __hc32 hw_token;
+ __hc32 hw_buf[5];
+ __hc32 hw_buf_hi[5];
+} __aligned(32);
+
+struct fotg210_qh {
+ struct fotg210_qh_hw *hw; /* Must come first */
+ /* the rest is HCD-private */
+ dma_addr_t qh_dma; /* address of qh */
+ union fotg210_shadow qh_next; /* ptr to qh; or periodic */
+ struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
+ struct fotg210_qtd *dummy;
+ struct fotg210_qh *unlink_next; /* next on unlink list */
+
+ unsigned unlink_cycle;
+
+ u8 needs_rescan; /* Dequeue during giveback */
+ u8 qh_state;
+#define QH_STATE_LINKED 1 /* HC sees this */
+#define QH_STATE_UNLINK 2 /* HC may still see this */
+#define QH_STATE_IDLE 3 /* HC doesn't see this */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
+#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
+
+ u8 xacterrs; /* XactErr retry counter */
+#define QH_XACTERR_MAX 32 /* XactErr retry limit */
+
+ /* periodic schedule info */
+ u8 usecs; /* intr bandwidth */
+ u8 gap_uf; /* uframes split/csplit gap */
+ u8 c_usecs; /* ... split completion bw */
+ u16 tt_usecs; /* tt downstream bandwidth */
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+#define NO_FRAME ((unsigned short)~0) /* pick new start */
+
+ struct usb_device *dev; /* access to TT */
+ unsigned is_out:1; /* bulk or intr OUT */
+ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct fotg210_iso_packet {
+ /* These will be copied to iTD when scheduling */
+ u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
+ __hc32 transaction; /* itd->hw_transaction[i] |= */
+ u8 cross; /* buf crosses pages */
+ /* for full speed OUT splits */
+ u32 buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct fotg210_iso_sched {
+ struct list_head td_list;
+ unsigned span;
+ struct fotg210_iso_packet packet[0];
+};
+
+/*
+ * fotg210_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct fotg210_iso_stream {
+ /* first field matches fotg210_hq, but is NULL */
+ struct fotg210_qh_hw *hw;
+
+ u8 bEndpointAddress;
+ u8 highspeed;
+ struct list_head td_list; /* queued itds */
+ struct list_head free_list; /* list of unused itds */
+ struct usb_device *udev;
+ struct usb_host_endpoint *ep;
+
+ /* output of (re)scheduling */
+ int next_uframe;
+ __hc32 splits;
+
+ /* the rest is derived from the endpoint descriptor,
+ * trusting urb->interval == f(epdesc->bInterval) and
+ * including the extra info for hw_bufp[0..2]
+ */
+ u8 usecs, c_usecs;
+ u16 interval;
+ u16 tt_usecs;
+ u16 maxp;
+ u16 raw_mask;
+ unsigned bandwidth;
+
+ /* This is used to initialize iTD's hw_bufp fields */
+ __hc32 buf0;
+ __hc32 buf1;
+ __hc32 buf2;
+
+ /* this is used to initialize sITD's tt info */
+ __hc32 address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct fotg210_itd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.3.1 */
+ __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */
+#define FOTG210_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
+#define FOTG210_ISOC_BUF_ERR (1<<30) /* Data buffer error */
+#define FOTG210_ISOC_BABBLE (1<<29) /* babble detected */
+#define FOTG210_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
+#define FOTG210_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
+#define FOTG210_ITD_IOC (1 << 15) /* interrupt on complete */
+
+#define ITD_ACTIVE(fotg210) cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE)
+
+ __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */
+ __hc32 hw_bufp_hi[7]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t itd_dma; /* for this itd */
+ union fotg210_shadow itd_next; /* ptr to periodic q entry */
+
+ struct urb *urb;
+ struct fotg210_iso_stream *stream; /* endpoint's queue */
+ struct list_head itd_list; /* list of stream's itds */
+
+ /* any/all hw_transactions here may be used by that urb */
+ unsigned frame; /* where scheduled */
+ unsigned pg;
+ unsigned index[8]; /* in urb->iso_frame_desc */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct fotg210_fstn {
+ __hc32 hw_next; /* any periodic q entry */
+ __hc32 hw_prev; /* qh or FOTG210_LIST_END */
+
+ /* the rest is HCD-private */
+ dma_addr_t fstn_dma;
+ union fotg210_shadow fstn_next; /* ptr to periodic q entry */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
+ fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup);
+
+#define fotg210_prepare_ports_for_controller_resume(fotg210) \
+ fotg210_adjust_port_wakeup_flags(fotg210, false, false);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature. Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+static inline unsigned int
+fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+ return (readl(&fotg210->regs->otgcsr)
+ & OTGCSR_HOST_SPD_TYP) >> 22;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+ switch (fotg210_get_speed(fotg210, portsc)) {
+ case 0:
+ return 0;
+ case 1:
+ return USB_PORT_STAT_LOW_SPEED;
+ case 2:
+ default:
+ return USB_PORT_STAT_HIGH_SPEED;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_has_fsl_portno_bug(e) (0)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ */
+
+#define fotg210_big_endian_mmio(e) 0
+#define fotg210_big_endian_capbase(e) 0
+
+static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210,
+ __u32 __iomem *regs)
+{
+ return readl(regs);
+}
+
+static inline void fotg210_writel(const struct fotg210_hcd *fotg210,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ writel(val, regs);
+}
+
+/* cpu to fotg210 */
+static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x)
+{
+ return cpu_to_le32(x);
+}
+
+/* fotg210 to cpu */
+static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x)
+{
+ return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210,
+ const __hc32 *x)
+{
+ return le32_to_cpup(x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
+{
+ return fotg210_readl(fotg210, &fotg210->regs->frame_index);
+}
+
+#define fotg210_itdlen(urb, desc, t) ({ \
+ usb_pipein((urb)->pipe) ? \
+ (desc)->length - FOTG210_ITD_LENGTH(t) : \
+ FOTG210_ITD_LENGTH(t); \
+})
+/*-------------------------------------------------------------------------*/
+
+#endif /* __LINUX_FOTG210_H */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
new file mode 100644
index 00000000000..9162d1b6c0a
--- /dev/null
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -0,0 +1,340 @@
+/*
+ * Setup platform devices needed by the Freescale multi-port host
+ * and/or dual-role USB controller modules based on the description
+ * in flat device tree.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+
+struct fsl_usb2_dev_data {
+ char *dr_mode; /* controller mode */
+ char *drivers[3]; /* drivers to instantiate for this mode */
+ enum fsl_usb2_operating_modes op_mode; /* operating mode */
+};
+
+static struct fsl_usb2_dev_data dr_mode_data[] = {
+ {
+ .dr_mode = "host",
+ .drivers = { "fsl-ehci", NULL, NULL, },
+ .op_mode = FSL_USB2_DR_HOST,
+ },
+ {
+ .dr_mode = "otg",
+ .drivers = { "fsl-usb2-otg", "fsl-ehci", "fsl-usb2-udc", },
+ .op_mode = FSL_USB2_DR_OTG,
+ },
+ {
+ .dr_mode = "peripheral",
+ .drivers = { "fsl-usb2-udc", NULL, NULL, },
+ .op_mode = FSL_USB2_DR_DEVICE,
+ },
+};
+
+static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
+{
+ const unsigned char *prop;
+ int i;
+
+ prop = of_get_property(np, "dr_mode", NULL);
+ if (prop) {
+ for (i = 0; i < ARRAY_SIZE(dr_mode_data); i++) {
+ if (!strcmp(prop, dr_mode_data[i].dr_mode))
+ return &dr_mode_data[i];
+ }
+ }
+ pr_warn("%s: Invalid 'dr_mode' property, fallback to host mode\n",
+ np->full_name);
+ return &dr_mode_data[0]; /* mode not specified, use host */
+}
+
+static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
+{
+ if (!phy_type)
+ return FSL_USB2_PHY_NONE;
+ if (!strcasecmp(phy_type, "ulpi"))
+ return FSL_USB2_PHY_ULPI;
+ if (!strcasecmp(phy_type, "utmi"))
+ return FSL_USB2_PHY_UTMI;
+ if (!strcasecmp(phy_type, "utmi_wide"))
+ return FSL_USB2_PHY_UTMI_WIDE;
+ if (!strcasecmp(phy_type, "serial"))
+ return FSL_USB2_PHY_SERIAL;
+
+ return FSL_USB2_PHY_NONE;
+}
+
+static struct platform_device *fsl_usb2_device_register(
+ struct platform_device *ofdev,
+ struct fsl_usb2_platform_data *pdata,
+ const char *name, int id)
+{
+ struct platform_device *pdev;
+ const struct resource *res = ofdev->resource;
+ unsigned int num = ofdev->num_resources;
+ int retval;
+
+ pdev = platform_device_alloc(name, id);
+ if (!pdev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ pdev->dev.parent = &ofdev->dev;
+
+ pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
+ *pdev->dev.dma_mask = *ofdev->dev.dma_mask;
+
+ retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
+ if (retval)
+ goto error;
+
+ if (num) {
+ retval = platform_device_add_resources(pdev, res, num);
+ if (retval)
+ goto error;
+ }
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto error;
+
+ return pdev;
+
+error:
+ platform_device_put(pdev);
+ return ERR_PTR(retval);
+}
+
+static const struct of_device_id fsl_usb2_mph_dr_of_match[];
+
+static int usb_get_ver_info(struct device_node *np)
+{
+ int ver = -1;
+
+ /*
+ * returns 1 for usb controller version 1.6
+ * returns 2 for usb controller version 2.2
+ * returns 0 otherwise
+ */
+ if (of_device_is_compatible(np, "fsl-usb2-dr")) {
+ if (of_device_is_compatible(np, "fsl-usb2-dr-v1.6"))
+ ver = FSL_USB_VER_1_6;
+ else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.2"))
+ ver = FSL_USB_VER_2_2;
+ else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.4"))
+ ver = FSL_USB_VER_2_4;
+ else /* for previous controller versions */
+ ver = FSL_USB_VER_OLD;
+
+ if (ver > -1)
+ return ver;
+ }
+
+ if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
+ return FSL_USB_VER_OLD;
+
+ if (of_device_is_compatible(np, "fsl-usb2-mph")) {
+ if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
+ ver = FSL_USB_VER_1_6;
+ else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.2"))
+ ver = FSL_USB_VER_2_2;
+ else /* for previous controller versions */
+ ver = FSL_USB_VER_OLD;
+ }
+
+ return ver;
+}
+
+static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct platform_device *usb_dev;
+ struct fsl_usb2_platform_data data, *pdata;
+ struct fsl_usb2_dev_data *dev_data;
+ const struct of_device_id *match;
+ const unsigned char *prop;
+ static unsigned int idx;
+ int i;
+
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ match = of_match_device(fsl_usb2_mph_dr_of_match, &ofdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ pdata = &data;
+ if (match->data)
+ memcpy(pdata, match->data, sizeof(data));
+ else
+ memset(pdata, 0, sizeof(data));
+
+ dev_data = get_dr_mode_data(np);
+
+ if (of_device_is_compatible(np, "fsl-usb2-mph")) {
+ if (of_get_property(np, "port0", NULL))
+ pdata->port_enables |= FSL_USB2_PORT0_ENABLED;
+
+ if (of_get_property(np, "port1", NULL))
+ pdata->port_enables |= FSL_USB2_PORT1_ENABLED;
+
+ pdata->operating_mode = FSL_USB2_MPH_HOST;
+ } else {
+ if (of_get_property(np, "fsl,invert-drvvbus", NULL))
+ pdata->invert_drvvbus = 1;
+
+ if (of_get_property(np, "fsl,invert-pwr-fault", NULL))
+ pdata->invert_pwr_fault = 1;
+
+ /* setup mode selected in the device tree */
+ pdata->operating_mode = dev_data->op_mode;
+ }
+
+ prop = of_get_property(np, "phy_type", NULL);
+ pdata->phy_mode = determine_usb_phy(prop);
+ pdata->controller_ver = usb_get_ver_info(np);
+
+ if (pdata->have_sysif_regs) {
+ if (pdata->controller_ver < 0) {
+ dev_warn(&ofdev->dev, "Could not get controller version\n");
+ return -ENODEV;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev_data->drivers); i++) {
+ if (!dev_data->drivers[i])
+ continue;
+ usb_dev = fsl_usb2_device_register(ofdev, pdata,
+ dev_data->drivers[i], idx);
+ if (IS_ERR(usb_dev)) {
+ dev_err(&ofdev->dev, "Can't register usb device\n");
+ return PTR_ERR(usb_dev);
+ }
+ }
+ idx++;
+ return 0;
+}
+
+static int __unregister_subdev(struct device *dev, void *d)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
+{
+ device_for_each_child(&ofdev->dev, NULL, __unregister_subdev);
+ return 0;
+}
+
+#ifdef CONFIG_PPC_MPC512x
+
+#define USBGENCTRL 0x200 /* NOTE: big endian */
+#define GC_WU_INT_CLR (1 << 5) /* Wakeup int clear */
+#define GC_ULPI_SEL (1 << 4) /* ULPI i/f select (usb0 only)*/
+#define GC_PPP (1 << 3) /* Inv. Port Power Polarity */
+#define GC_PFP (1 << 2) /* Inv. Power Fault Polarity */
+#define GC_WU_ULPI_EN (1 << 1) /* Wakeup on ULPI event */
+#define GC_WU_IE (1 << 1) /* Wakeup interrupt enable */
+
+#define ISIPHYCTRL 0x204 /* NOTE: big endian */
+#define PHYCTRL_PHYE (1 << 4) /* On-chip UTMI PHY enable */
+#define PHYCTRL_BSENH (1 << 3) /* Bit Stuff Enable High */
+#define PHYCTRL_BSEN (1 << 2) /* Bit Stuff Enable */
+#define PHYCTRL_LSFE (1 << 1) /* Line State Filter Enable */
+#define PHYCTRL_PXE (1 << 0) /* PHY oscillator enable */
+
+int fsl_usb2_mpc5121_init(struct platform_device *pdev)
+{
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct clk *clk;
+ int err;
+
+ clk = devm_clk_get(pdev->dev.parent, "ipg");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clk\n");
+ return PTR_ERR(clk);
+ }
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable clk\n");
+ return err;
+ }
+ pdata->clk = clk;
+
+ if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) {
+ u32 reg = 0;
+
+ if (pdata->invert_drvvbus)
+ reg |= GC_PPP;
+
+ if (pdata->invert_pwr_fault)
+ reg |= GC_PFP;
+
+ out_be32(pdata->regs + ISIPHYCTRL, PHYCTRL_PHYE | PHYCTRL_PXE);
+ out_be32(pdata->regs + USBGENCTRL, reg);
+ }
+ return 0;
+}
+
+static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
+{
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ pdata->regs = NULL;
+
+ if (pdata->clk)
+ clk_disable_unprepare(pdata->clk);
+}
+
+static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
+ .big_endian_desc = 1,
+ .big_endian_mmio = 1,
+ .es = 1,
+ .have_sysif_regs = 0,
+ .le_setup_buf = 1,
+ .init = fsl_usb2_mpc5121_init,
+ .exit = fsl_usb2_mpc5121_exit,
+};
+#endif /* CONFIG_PPC_MPC512x */
+
+static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
+ .have_sysif_regs = 1,
+};
+
+static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
+ { .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
+ { .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
+#ifdef CONFIG_PPC_MPC512x
+ { .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
+#endif
+ {},
+};
+
+static struct platform_driver fsl_usb2_mph_dr_driver = {
+ .driver = {
+ .name = "fsl-usb2-mph-dr",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_usb2_mph_dr_of_match,
+ },
+ .probe = fsl_usb2_mph_dr_of_probe,
+ .remove = fsl_usb2_mph_dr_of_remove,
+};
+
+module_platform_driver(fsl_usb2_mph_dr_driver);
+
+MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
new file mode 100644
index 00000000000..ba9499060f6
--- /dev/null
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -0,0 +1,5894 @@
+/*
+ * Faraday FUSBH200 EHCI-like driver
+ *
+ * Copyright (c) 2013 Faraday Technology Corporation
+ *
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ * Feng-Hsin Chiang <john453@faraday-tech.com>
+ * Po-Yu Chuang <ratbert.chuang@gmail.com>
+ *
+ * Most of code borrowed from the Linux-3.7 EHCI driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+/*-------------------------------------------------------------------------*/
+#define DRIVER_AUTHOR "Yuan-Hsin Chen"
+#define DRIVER_DESC "FUSBH200 Host Controller (EHCI) Driver"
+
+static const char hcd_name [] = "fusbh200_hcd";
+
+#undef FUSBH200_URB_TRACE
+
+/* magic numbers that can affect system performance */
+#define FUSBH200_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define FUSBH200_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define FUSBH200_TUNE_RL_TT 0
+#define FUSBH200_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define FUSBH200_TUNE_MULT_TT 1
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define FUSBH200_TUNE_FLS 1 /* (medium) 512-frame schedule */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh = 0; // 0 to 6
+module_param (log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting: slower than hw default */
+static unsigned park = 0;
+module_param (park, uint, S_IRUGO);
+MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
+
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+#include "fusbh200.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_dbg(fusbh200, fmt, args...) \
+ dev_dbg (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_err(fusbh200, fmt, args...) \
+ dev_err (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_info(fusbh200, fmt, args...) \
+ dev_info (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_warn(fusbh200, fmt, args...) \
+ dev_warn (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+
+/* check the values in the HCSPARAMS register
+ * (host controller _Structural_ parameters)
+ * see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label)
+{
+ u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+
+ fusbh200_dbg (fusbh200,
+ "%s hcs_params 0x%x ports=%d\n",
+ label, params,
+ HCS_N_PORTS (params)
+ );
+}
+
+/* check the values in the HCCPARAMS register
+ * (host controller _Capability_ parameters)
+ * see EHCI Spec, Table 2-5 for each value
+ * */
+static void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label)
+{
+ u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+ fusbh200_dbg (fusbh200,
+ "%s hcc_params %04x uframes %s%s\n",
+ label,
+ params,
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "");
+}
+
+static void __maybe_unused
+dbg_qtd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
+{
+ fusbh200_dbg(fusbh200, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+ hc32_to_cpup(fusbh200, &qtd->hw_next),
+ hc32_to_cpup(fusbh200, &qtd->hw_alt_next),
+ hc32_to_cpup(fusbh200, &qtd->hw_token),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf [0]));
+ if (qtd->hw_buf [1])
+ fusbh200_dbg(fusbh200, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[1]),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[2]),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[3]),
+ hc32_to_cpup(fusbh200, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ fusbh200_dbg (fusbh200, "%s qh %p n%08x info %x %x qtd %x\n", label,
+ qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ dbg_qtd("overlay", fusbh200, (struct fusbh200_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
+{
+ fusbh200_dbg (fusbh200, "%s [%d] itd %p, next %08x, urb %p\n",
+ label, itd->frame, itd, hc32_to_cpu(fusbh200, itd->hw_next),
+ itd->urb);
+ fusbh200_dbg (fusbh200,
+ " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fusbh200, itd->hw_transaction[0]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[1]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[2]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[3]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[4]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[5]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[6]),
+ hc32_to_cpu(fusbh200, itd->hw_transaction[7]));
+ fusbh200_dbg (fusbh200,
+ " buf: %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fusbh200, itd->hw_bufp[0]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[1]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[2]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[3]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[4]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[5]),
+ hc32_to_cpu(fusbh200, itd->hw_bufp[6]));
+ fusbh200_dbg (fusbh200, " index: %d %d %d %d %d %d %d %d\n",
+ itd->index[0], itd->index[1], itd->index[2],
+ itd->index[3], itd->index[4], itd->index[5],
+ itd->index[6], itd->index[7]);
+}
+
+static int __maybe_unused
+dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+{
+ return scnprintf (buf, len,
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", status,
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+ (status & STS_HALT) ? " Halt" : "",
+ (status & STS_IAA) ? " IAA" : "",
+ (status & STS_FATAL) ? " FATAL" : "",
+ (status & STS_FLR) ? " FLR" : "",
+ (status & STS_PCD) ? " PCD" : "",
+ (status & STS_ERR) ? " ERR" : "",
+ (status & STS_INT) ? " INT" : ""
+ );
+}
+
+static int __maybe_unused
+dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+{
+ return scnprintf (buf, len,
+ "%s%sintrenable %02x%s%s%s%s%s%s",
+ label, label [0] ? " " : "", enable,
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+ (enable & STS_PCD) ? " PCD" : "",
+ (enable & STS_ERR) ? " ERR" : "",
+ (enable & STS_INT) ? " INT" : ""
+ );
+}
+
+static const char *const fls_strings [] =
+ { "1024", "512", "256", "??" };
+
+static int
+dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+{
+ return scnprintf (buf, len,
+ "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
+ "period=%s%s %s",
+ label, label [0] ? " " : "", command,
+ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT (command),
+ (command >> 16) & 0x3f,
+ (command & CMD_IAAD) ? " IAAD" : "",
+ (command & CMD_ASE) ? " Async" : "",
+ (command & CMD_PSE) ? " Periodic" : "",
+ fls_strings [(command >> 2) & 0x3],
+ (command & CMD_RESET) ? " Reset" : "",
+ (command & CMD_RUN) ? "RUN" : "HALT"
+ );
+}
+
+static int
+dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
+{
+ char *sig;
+
+ /* signaling state */
+ switch (status & (3 << 10)) {
+ case 0 << 10: sig = "se0"; break;
+ case 1 << 10: sig = "k"; break; /* low speed */
+ case 2 << 10: sig = "j"; break;
+ default: sig = "?"; break;
+ }
+
+ return scnprintf (buf, len,
+ "%s%sport:%d status %06x %d "
+ "sig=%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", port, status,
+ status>>25,/*device address */
+ sig,
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+ (status & PORT_PEC) ? " PEC" : "",
+ (status & PORT_PE) ? " PE" : "",
+ (status & PORT_CSC) ? " CSC" : "",
+ (status & PORT_CONNECT) ? " CONNECT" : "");
+}
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(fusbh200, label, status) { \
+ char _buf [80]; \
+ dbg_status_buf (_buf, sizeof _buf, label, status); \
+ fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+#define dbg_cmd(fusbh200, label, command) { \
+ char _buf [80]; \
+ dbg_command_buf (_buf, sizeof _buf, label, command); \
+ fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+#define dbg_port(fusbh200, label, port, status) { \
+ char _buf [80]; \
+ dbg_port_buf (_buf, sizeof _buf, label, port, status); \
+ fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* troubleshooting help: expose state in debugfs */
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_async_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_periodic_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_registers_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+
+static struct dentry *fusbh200_debug_root;
+
+struct debug_buffer {
+ ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
+ struct usb_bus *bus;
+ struct mutex mutex; /* protect filling of buffer */
+ size_t count; /* number of characters filled into buffer */
+ char *output_buf;
+ size_t alloc_size;
+};
+
+#define speed_char(info1) ({ char tmp; \
+ switch (info1 & (3 << 12)) { \
+ case QH_FULL_SPEED: tmp = 'f'; break; \
+ case QH_LOW_SPEED: tmp = 'l'; break; \
+ case QH_HIGH_SPEED: tmp = 'h'; break; \
+ default: tmp = '?'; break; \
+ } tmp; })
+
+static inline char token_mark(struct fusbh200_hcd *fusbh200, __hc32 token)
+{
+ __u32 v = hc32_to_cpu(fusbh200, token);
+
+ if (v & QTD_STS_ACTIVE)
+ return '*';
+ if (v & QTD_STS_HALT)
+ return '-';
+ if (!IS_SHORT_READ (v))
+ return ' ';
+ /* tries to advance through hw_alt_next */
+ return '/';
+}
+
+static void qh_lines (
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_qh *qh,
+ char **nextp,
+ unsigned *sizep
+)
+{
+ u32 scratch;
+ u32 hw_curr;
+ struct fusbh200_qtd *td;
+ unsigned temp;
+ unsigned size = *sizep;
+ char *next = *nextp;
+ char mark;
+ __le32 list_end = FUSBH200_LIST_END(fusbh200);
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
+ mark = '@';
+ else
+ mark = token_mark(fusbh200, hw->hw_token);
+ if (mark == '/') { /* qh_alt_next controls qh advance? */
+ if ((hw->hw_alt_next & QTD_MASK(fusbh200))
+ == fusbh200->async->hw->hw_alt_next)
+ mark = '#'; /* blocked */
+ else if (hw->hw_alt_next == list_end)
+ mark = '.'; /* use hw_qtd_next */
+ /* else alt_next points to some other qtd */
+ }
+ scratch = hc32_to_cpup(fusbh200, &hw->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(fusbh200, &hw->hw_current) : 0;
+ temp = scnprintf (next, size,
+ "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
+ qh, scratch & 0x007f,
+ speed_char (scratch),
+ (scratch >> 8) & 0x000f,
+ scratch, hc32_to_cpup(fusbh200, &hw->hw_info2),
+ hc32_to_cpup(fusbh200, &hw->hw_token), mark,
+ (cpu_to_hc32(fusbh200, QTD_TOGGLE) & hw->hw_token)
+ ? "data1" : "data0",
+ (hc32_to_cpup(fusbh200, &hw->hw_alt_next) >> 1) & 0x0f);
+ size -= temp;
+ next += temp;
+
+ /* hc may be modifying the list as we read it ... */
+ list_for_each_entry(td, &qh->qtd_list, qtd_list) {
+ scratch = hc32_to_cpup(fusbh200, &td->hw_token);
+ mark = ' ';
+ if (hw_curr == td->qtd_dma)
+ mark = '*';
+ else if (hw->hw_qtd_next == cpu_to_hc32(fusbh200, td->qtd_dma))
+ mark = '+';
+ else if (QTD_LENGTH (scratch)) {
+ if (td->hw_alt_next == fusbh200->async->hw->hw_alt_next)
+ mark = '#';
+ else if (td->hw_alt_next != list_end)
+ mark = '/';
+ }
+ temp = snprintf (next, size,
+ "\n\t%p%c%s len=%d %08x urb %p",
+ td, mark, ({ char *tmp;
+ switch ((scratch>>8)&0x03) {
+ case 0: tmp = "out"; break;
+ case 1: tmp = "in"; break;
+ case 2: tmp = "setup"; break;
+ default: tmp = "?"; break;
+ } tmp;}),
+ (scratch >> 16) & 0x7fff,
+ scratch,
+ td->urb);
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+ if (temp == size)
+ goto done;
+ }
+
+ temp = snprintf (next, size, "\n");
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+
+done:
+ *sizep = size;
+ *nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fusbh200_hcd *fusbh200;
+ unsigned long flags;
+ unsigned temp, size;
+ char *next;
+ struct fusbh200_qh *qh;
+
+ hcd = bus_to_hcd(buf->bus);
+ fusbh200 = hcd_to_fusbh200 (hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ /* dumps a snapshot of the async schedule.
+ * usually empty except for long-term bulk reads, or head.
+ * one QH per line, and TDs we know about
+ */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ for (qh = fusbh200->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
+ qh_lines (fusbh200, qh, &next, &size);
+ if (fusbh200->async_unlink && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
+ size -= temp;
+ next += temp;
+
+ for (qh = fusbh200->async_unlink; size > 0 && qh;
+ qh = qh->unlink_next)
+ qh_lines (fusbh200, qh, &next, &size);
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+ return strlen(buf->output_buf);
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fusbh200_hcd *fusbh200;
+ unsigned long flags;
+ union fusbh200_shadow p, *seen;
+ unsigned temp, size, seen_count;
+ char *next;
+ unsigned i;
+ __hc32 tag;
+
+ if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
+ return 0;
+ seen_count = 0;
+
+ hcd = bus_to_hcd(buf->bus);
+ fusbh200 = hcd_to_fusbh200 (hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ temp = scnprintf (next, size, "size = %d\n", fusbh200->periodic_size);
+ size -= temp;
+ next += temp;
+
+ /* dump a snapshot of the periodic schedule.
+ * iso changes, interrupt usually doesn't.
+ */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ for (i = 0; i < fusbh200->periodic_size; i++) {
+ p = fusbh200->pshadow [i];
+ if (likely (!p.ptr))
+ continue;
+ tag = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [i]);
+
+ temp = scnprintf (next, size, "%4d: ", i);
+ size -= temp;
+ next += temp;
+
+ do {
+ struct fusbh200_qh_hw *hw;
+
+ switch (hc32_to_cpu(fusbh200, tag)) {
+ case Q_TYPE_QH:
+ hw = p.qh->hw;
+ temp = scnprintf (next, size, " qh%d-%04x/%p",
+ p.qh->period,
+ hc32_to_cpup(fusbh200,
+ &hw->hw_info2)
+ /* uframe masks */
+ & (QH_CMASK | QH_SMASK),
+ p.qh);
+ size -= temp;
+ next += temp;
+ /* don't repeat what follows this qh */
+ for (temp = 0; temp < seen_count; temp++) {
+ if (seen [temp].ptr != p.ptr)
+ continue;
+ if (p.qh->qh_next.ptr) {
+ temp = scnprintf (next, size,
+ " ...");
+ size -= temp;
+ next += temp;
+ }
+ break;
+ }
+ /* show more info the first time around */
+ if (temp == seen_count) {
+ u32 scratch = hc32_to_cpup(fusbh200,
+ &hw->hw_info1);
+ struct fusbh200_qtd *qtd;
+ char *type = "";
+
+ /* count tds, get ep direction */
+ temp = 0;
+ list_for_each_entry (qtd,
+ &p.qh->qtd_list,
+ qtd_list) {
+ temp++;
+ switch (0x03 & (hc32_to_cpu(
+ fusbh200,
+ qtd->hw_token) >> 8)) {
+ case 0: type = "out"; continue;
+ case 1: type = "in"; continue;
+ }
+ }
+
+ temp = scnprintf (next, size,
+ " (%c%d ep%d%s "
+ "[%d/%d] q%d p%d)",
+ speed_char (scratch),
+ scratch & 0x007f,
+ (scratch >> 8) & 0x000f, type,
+ p.qh->usecs, p.qh->c_usecs,
+ temp,
+ 0x7ff & (scratch >> 16));
+
+ if (seen_count < DBG_SCHED_LIMIT)
+ seen [seen_count++].qh = p.qh;
+ } else
+ temp = 0;
+ tag = Q_NEXT_TYPE(fusbh200, hw->hw_next);
+ p = p.qh->qh_next;
+ break;
+ case Q_TYPE_FSTN:
+ temp = scnprintf (next, size,
+ " fstn-%8x/%p", p.fstn->hw_prev,
+ p.fstn);
+ tag = Q_NEXT_TYPE(fusbh200, p.fstn->hw_next);
+ p = p.fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ temp = scnprintf (next, size,
+ " itd/%p", p.itd);
+ tag = Q_NEXT_TYPE(fusbh200, p.itd->hw_next);
+ p = p.itd->itd_next;
+ break;
+ }
+ size -= temp;
+ next += temp;
+ } while (p.ptr);
+
+ temp = scnprintf (next, size, "\n");
+ size -= temp;
+ next += temp;
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ kfree (seen);
+
+ return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct fusbh200_hcd *fusbh200)
+{
+ switch (fusbh200->rh_state) {
+ case FUSBH200_RH_HALTED:
+ return "halted";
+ case FUSBH200_RH_SUSPENDED:
+ return "suspended";
+ case FUSBH200_RH_RUNNING:
+ return "running";
+ case FUSBH200_RH_STOPPING:
+ return "stopping";
+ }
+ return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fusbh200_hcd *fusbh200;
+ unsigned long flags;
+ unsigned temp, size, i;
+ char *next, scratch [80];
+ static char fmt [] = "%*s\n";
+ static char label [] = "";
+
+ hcd = bus_to_hcd(buf->bus);
+ fusbh200 = hcd_to_fusbh200 (hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ size = scnprintf (next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "SUSPENDED (no register access)\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc);
+ goto done;
+ }
+
+ /* Capability Registers */
+ i = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+ temp = scnprintf (next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "EHCI %x.%02x, rh state %s\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc,
+ i >> 8, i & 0x0ff, rh_state_string(fusbh200));
+ size -= temp;
+ next += temp;
+
+ // FIXME interpret both types of params
+ i = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+ temp = scnprintf (next, size, "structural params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ i = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+ temp = scnprintf (next, size, "capability params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ /* Operational Registers */
+ temp = dbg_status_buf (scratch, sizeof scratch, label,
+ fusbh200_readl(fusbh200, &fusbh200->regs->status));
+ temp = scnprintf (next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_command_buf (scratch, sizeof scratch, label,
+ fusbh200_readl(fusbh200, &fusbh200->regs->command));
+ temp = scnprintf (next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_intr_buf (scratch, sizeof scratch, label,
+ fusbh200_readl(fusbh200, &fusbh200->regs->intr_enable));
+ temp = scnprintf (next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf (next, size, "uframe %04x\n",
+ fusbh200_read_frame_index(fusbh200));
+ size -= temp;
+ next += temp;
+
+ if (fusbh200->async_unlink) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ fusbh200->async_unlink);
+ size -= temp;
+ next += temp;
+ }
+
+ temp = scnprintf (next, size,
+ "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
+ fusbh200->stats.lost_iaa);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf (next, size, "complete %ld unlink %ld\n",
+ fusbh200->stats.complete, fusbh200->stats.unlink);
+ size -= temp;
+ next += temp;
+
+done:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+ return buf->alloc_size - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
+ ssize_t (*fill_func)(struct debug_buffer *))
+{
+ struct debug_buffer *buf;
+
+ buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+ if (buf) {
+ buf->bus = bus;
+ buf->fill_func = fill_func;
+ mutex_init(&buf->mutex);
+ buf->alloc_size = PAGE_SIZE;
+ }
+
+ return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+ int ret = 0;
+
+ if (!buf->output_buf)
+ buf->output_buf = vmalloc(buf->alloc_size);
+
+ if (!buf->output_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = buf->fill_func(buf);
+
+ if (ret >= 0) {
+ buf->count = ret;
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+ size_t len, loff_t *offset)
+{
+ struct debug_buffer *buf = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&buf->mutex);
+ if (buf->count == 0) {
+ ret = fill_buffer(buf);
+ if (ret != 0) {
+ mutex_unlock(&buf->mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&buf->mutex);
+
+ ret = simple_read_from_buffer(user_buf, len, offset,
+ buf->output_buf, buf->count);
+
+out:
+ return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf = file->private_data;
+
+ if (buf) {
+ vfree(buf->output_buf);
+ kfree(buf);
+ }
+
+ return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf;
+ buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+ file->private_data = buf;
+ return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_registers_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files (struct fusbh200_hcd *fusbh200)
+{
+ struct usb_bus *bus = &fusbh200_to_hcd(fusbh200)->self;
+
+ fusbh200->debug_dir = debugfs_create_dir(bus->bus_name, fusbh200_debug_root);
+ if (!fusbh200->debug_dir)
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, fusbh200->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, fusbh200->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, fusbh200->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(fusbh200->debug_dir);
+}
+
+static inline void remove_debug_files (struct fusbh200_hcd *fusbh200)
+{
+ debugfs_remove_recursive(fusbh200->debug_dir);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown: shutting down the bridge before the devices using it.
+ */
+static int handshake (struct fusbh200_hcd *fusbh200, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = fusbh200_readl(fusbh200, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay (1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fusbh200_halt (struct fusbh200_hcd *fusbh200)
+{
+ u32 temp;
+
+ spin_lock_irq(&fusbh200->lock);
+
+ /* disable any irqs left enabled by previous code */
+ fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+
+ /*
+ * This routine gets called during probe before fusbh200->command
+ * has been initialized, so we can't rely on its value.
+ */
+ fusbh200->command &= ~CMD_RUN;
+ temp = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+ temp &= ~(CMD_RUN | CMD_IAAD);
+ fusbh200_writel(fusbh200, temp, &fusbh200->regs->command);
+
+ spin_unlock_irq(&fusbh200->lock);
+ synchronize_irq(fusbh200_to_hcd(fusbh200)->irq);
+
+ return handshake(fusbh200, &fusbh200->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
+}
+
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fusbh200_reset (struct fusbh200_hcd *fusbh200)
+{
+ int retval;
+ u32 command = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+
+ /* If the EHCI debug controller is active, special care must be
+ * taken before and after a host controller reset */
+ if (fusbh200->debug && !dbgp_reset_prep(fusbh200_to_hcd(fusbh200)))
+ fusbh200->debug = NULL;
+
+ command |= CMD_RESET;
+ dbg_cmd (fusbh200, "reset", command);
+ fusbh200_writel(fusbh200, command, &fusbh200->regs->command);
+ fusbh200->rh_state = FUSBH200_RH_HALTED;
+ fusbh200->next_statechange = jiffies;
+ retval = handshake (fusbh200, &fusbh200->regs->command,
+ CMD_RESET, 0, 250 * 1000);
+
+ if (retval)
+ return retval;
+
+ if (fusbh200->debug)
+ dbgp_external_startup(fusbh200_to_hcd(fusbh200));
+
+ fusbh200->port_c_suspend = fusbh200->suspended_ports =
+ fusbh200->resuming_ports = 0;
+ return retval;
+}
+
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fusbh200_quiesce (struct fusbh200_hcd *fusbh200)
+{
+ u32 temp;
+
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ /* wait for any schedule enables/disables to take effect */
+ temp = (fusbh200->command << 10) & (STS_ASS | STS_PSS);
+ handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, temp, 16 * 125);
+
+ /* then disable anything that's still active */
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->command &= ~(CMD_ASE | CMD_PSE);
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+ spin_unlock_irq(&fusbh200->lock);
+
+ /* hardware can take 16 microframes to turn off ... */
+ handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, 0, 16 * 125);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void end_unlink_async(struct fusbh200_hcd *fusbh200);
+static void unlink_empty_async(struct fusbh200_hcd *fusbh200);
+static void fusbh200_work(struct fusbh200_hcd *fusbh200);
+static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void fusbh200_set_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
+{
+ fusbh200->command |= bit;
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+
+ /* unblock posted write */
+ fusbh200_readl(fusbh200, &fusbh200->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void fusbh200_clear_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
+{
+ fusbh200->command &= ~bit;
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+
+ /* unblock posted write */
+ fusbh200_readl(fusbh200, &fusbh200->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from fusbh200->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * fusbh200->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * fusbh200->next_hrtimer_event. Whenever fusbh200->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum fusbh200_hrtimer_event in fusbh200.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* FUSBH200_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_FREE_ITDS */
+ 6 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void fusbh200_enable_event(struct fusbh200_hcd *fusbh200, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &fusbh200->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ fusbh200->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < fusbh200->next_hrtimer_event) {
+ fusbh200->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&fusbh200->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void fusbh200_poll_ASS(struct fusbh200_hcd *fusbh200)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ want = (fusbh200->command & CMD_ASE) ? STS_ASS : 0;
+ actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fusbh200->ASS_poll_count++ < 20) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_ASS, true);
+ return;
+ }
+ fusbh200_dbg(fusbh200, "Waited too long for the async schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fusbh200->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fusbh200->async_count > 0)
+ fusbh200_set_command_bit(fusbh200, CMD_ASE);
+
+ } else { /* Running */
+ if (fusbh200->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void fusbh200_disable_ASE(struct fusbh200_hcd *fusbh200)
+{
+ fusbh200_clear_command_bit(fusbh200, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void fusbh200_poll_PSS(struct fusbh200_hcd *fusbh200)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ want = (fusbh200->command & CMD_PSE) ? STS_PSS : 0;
+ actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fusbh200->PSS_poll_count++ < 20) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_PSS, true);
+ return;
+ }
+ fusbh200_dbg(fusbh200, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fusbh200->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fusbh200->periodic_count > 0)
+ fusbh200_set_command_bit(fusbh200, CMD_PSE);
+
+ } else { /* Running */
+ if (fusbh200->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void fusbh200_disable_PSE(struct fusbh200_hcd *fusbh200)
+{
+ fusbh200_clear_command_bit(fusbh200, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void fusbh200_handle_controller_death(struct fusbh200_hcd *fusbh200)
+{
+ if (!(fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (fusbh200->died_poll_count++ < 5) {
+ /* Try again later */
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ fusbh200_warn(fusbh200, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ fusbh200->rh_state = FUSBH200_RH_HALTED;
+ fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+ fusbh200_work(fusbh200);
+ end_unlink_async(fusbh200);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void fusbh200_handle_intr_unlinks(struct fusbh200_hcd *fusbh200)
+{
+ bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ fusbh200->intr_unlinking = true;
+ while (fusbh200->intr_unlink) {
+ struct fusbh200_qh *qh = fusbh200->intr_unlink;
+
+ if (!stopped && qh->unlink_cycle == fusbh200->intr_unlink_cycle)
+ break;
+ fusbh200->intr_unlink = qh->unlink_next;
+ qh->unlink_next = NULL;
+ end_unlink_intr(fusbh200, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (fusbh200->intr_unlink) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
+ ++fusbh200->intr_unlink_cycle;
+ }
+ fusbh200->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct fusbh200_hcd *fusbh200)
+{
+ if (!(fusbh200->enabled_hrtimer_events & BIT(FUSBH200_HRTIMER_FREE_ITDS))) {
+ fusbh200->last_itd_to_free = list_entry(
+ fusbh200->cached_itd_list.prev,
+ struct fusbh200_itd, itd_list);
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_itd *itd, *n;
+
+ if (fusbh200->rh_state < FUSBH200_RH_RUNNING) {
+ fusbh200->last_itd_to_free = NULL;
+ }
+
+ list_for_each_entry_safe(itd, n, &fusbh200->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(fusbh200->itd_pool, itd, itd->itd_dma);
+ if (itd == fusbh200->last_itd_to_free)
+ break;
+ }
+
+ if (!list_empty(&fusbh200->cached_itd_list))
+ start_free_itds(fusbh200);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void fusbh200_iaa_watchdog(struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+ return;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (fusbh200->async_iaa) {
+ u32 cmd, status;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(fusbh200->stats.lost_iaa);
+ fusbh200_writel(fusbh200, STS_IAA, &fusbh200->regs->status);
+ }
+
+ fusbh200_dbg(fusbh200, "IAA watchdog: status %x cmd %x\n",
+ status, cmd);
+ end_unlink_async(fusbh200);
+ }
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct fusbh200_hcd *fusbh200)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (fusbh200->rh_state != FUSBH200_RH_RUNNING ||
+ (fusbh200->enabled_hrtimer_events &
+ BIT(FUSBH200_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (fusbh200->isoc_count > 0 || (fusbh200->need_io_watchdog &&
+ fusbh200->async_count + fusbh200->intr_count > 0))
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IO_WATCHDOG, true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum fusbh200_hrtimer_event in fusbh200.h.
+ */
+static void (*event_handlers[])(struct fusbh200_hcd *) = {
+ fusbh200_poll_ASS, /* FUSBH200_HRTIMER_POLL_ASS */
+ fusbh200_poll_PSS, /* FUSBH200_HRTIMER_POLL_PSS */
+ fusbh200_handle_controller_death, /* FUSBH200_HRTIMER_POLL_DEAD */
+ fusbh200_handle_intr_unlinks, /* FUSBH200_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* FUSBH200_HRTIMER_FREE_ITDS */
+ unlink_empty_async, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
+ fusbh200_iaa_watchdog, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
+ fusbh200_disable_PSE, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
+ fusbh200_disable_ASE, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
+ fusbh200_work, /* FUSBH200_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart fusbh200_hrtimer_func(struct hrtimer *t)
+{
+ struct fusbh200_hcd *fusbh200 = container_of(t, struct fusbh200_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&fusbh200->lock, flags);
+
+ events = fusbh200->enabled_hrtimer_events;
+ fusbh200->enabled_hrtimer_events = 0;
+ fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, FUSBH200_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= fusbh200->hr_timeouts[e].tv64)
+ event_handlers[e](fusbh200);
+ else
+ fusbh200_enable_event(fusbh200, e, false);
+ }
+
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ return HRTIMER_NORESTART;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_bus_suspend NULL
+#define fusbh200_bus_resume NULL
+
+/*-------------------------------------------------------------------------*/
+
+static int check_reset_complete (
+ struct fusbh200_hcd *fusbh200,
+ int index,
+ u32 __iomem *status_reg,
+ int port_status
+) {
+ if (!(port_status & PORT_CONNECT))
+ return port_status;
+
+ /* if reset finished and it's still not enabled -- handoff */
+ if (!(port_status & PORT_PE)) {
+ /* with integrated TT, there's nobody to hand it to! */
+ fusbh200_dbg (fusbh200,
+ "Failed to enable port %d on root hub TT\n",
+ index+1);
+ return port_status;
+ } else {
+ fusbh200_dbg(fusbh200, "port %d reset complete, port enabled\n",
+ index + 1);
+ }
+
+ return port_status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+fusbh200_hub_status_data (struct usb_hcd *hcd, char *buf)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ u32 temp, status;
+ u32 mask;
+ int retval = 1;
+ unsigned long flags;
+
+ /* init status to no-changes */
+ buf [0] = 0;
+
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = fusbh200->resuming_ports;
+
+ mask = PORT_CSC | PORT_PEC;
+ // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
+
+ /* no hub change reports (bit 0) for now (power, ...) */
+
+ /* port N changes (bit N)? */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ temp = fusbh200_readl(fusbh200, &fusbh200->regs->port_status);
+
+ /*
+ * Return status information even for ports with OWNER set.
+ * Otherwise khubd wouldn't see the disconnect event when a
+ * high-speed device is switched over to the companion
+ * controller by the user.
+ */
+
+ if ((temp & mask) != 0 || test_bit(0, &fusbh200->port_c_suspend)
+ || (fusbh200->reset_done[0] && time_after_eq(
+ jiffies, fusbh200->reset_done[0]))) {
+ buf [0] |= 1 << 1;
+ status = STS_PCD;
+ }
+ /* FIXME autosuspend idle root hubs */
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return status ? retval : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+fusbh200_hub_descriptor (
+ struct fusbh200_hcd *fusbh200,
+ struct usb_hub_descriptor *desc
+) {
+ int ports = HCS_N_PORTS (fusbh200->hcs_params);
+ u16 temp;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* fusbh200 1.0, 2.3.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+ temp = 0x0008; /* per-port overcurrent reporting */
+ temp |= 0x0002; /* no power switching */
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fusbh200_hub_control (
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+) {
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ int ports = HCS_N_PORTS (fusbh200->hcs_params);
+ u32 __iomem *status_reg = &fusbh200->regs->port_status;
+ u32 temp, temp1, status;
+ unsigned long flags;
+ int retval = 0;
+ unsigned selector;
+
+ /*
+ * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+ * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+ * (track current state ourselves) ... blink for diagnostics,
+ * power, "this is the one", etc. EHCI spec supports this.
+ */
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fusbh200_readl(fusbh200, status_reg);
+ temp &= ~PORT_RWC_BITS;
+
+ /*
+ * Even if OWNER is set, so the port is owned by the
+ * companion controller, khubd needs to be able to clear
+ * the port-change status bits (especially
+ * USB_PORT_STAT_C_CONNECTION).
+ */
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ fusbh200_writel(fusbh200, temp & ~PORT_PE, status_reg);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ fusbh200_writel(fusbh200, temp | PORT_PEC, status_reg);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ if (temp & PORT_RESET)
+ goto error;
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* resume signaling for 20 msec */
+ fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ fusbh200->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fusbh200->port_c_suspend);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ fusbh200_writel(fusbh200, temp | PORT_CSC, status_reg);
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ fusbh200_writel(fusbh200, temp | BMISR_OVC, &fusbh200->regs->bmisr);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* GetPortStatus clears reset */
+ break;
+ default:
+ goto error;
+ }
+ fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted write */
+ break;
+ case GetHubDescriptor:
+ fusbh200_hub_descriptor (fusbh200, (struct usb_hub_descriptor *)
+ buf);
+ break;
+ case GetHubStatus:
+ /* no hub-wide feature/status flags */
+ memset (buf, 0, 4);
+ //cpu_to_le32s ((u32 *) buf);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ temp = fusbh200_readl(fusbh200, status_reg);
+
+ // wPortChange bits
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+ temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
+ if (temp1 & BMISR_OVC)
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /* whoever resumes must GetPortStatus to complete it!! */
+ if (temp & PORT_RESUME) {
+
+ /* Remote Wakeup received? */
+ if (!fusbh200->reset_done[wIndex]) {
+ /* resume signaling for 20 msec */
+ fusbh200->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ /* check the port again */
+ mod_timer(&fusbh200_to_hcd(fusbh200)->rh_timer,
+ fusbh200->reset_done[wIndex]);
+ }
+
+ /* resume completed? */
+ else if (time_after_eq(jiffies,
+ fusbh200->reset_done[wIndex])) {
+ clear_bit(wIndex, &fusbh200->suspended_ports);
+ set_bit(wIndex, &fusbh200->port_c_suspend);
+ fusbh200->reset_done[wIndex] = 0;
+
+ /* stop resume signaling */
+ temp = fusbh200_readl(fusbh200, status_reg);
+ fusbh200_writel(fusbh200,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME),
+ status_reg);
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+ retval = handshake(fusbh200, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ fusbh200_err(fusbh200,
+ "port %d resume error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+ temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ }
+ }
+
+ /* whoever resets must GetPortStatus to complete it!! */
+ if ((temp & PORT_RESET)
+ && time_after_eq(jiffies,
+ fusbh200->reset_done[wIndex])) {
+ status |= USB_PORT_STAT_C_RESET << 16;
+ fusbh200->reset_done [wIndex] = 0;
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+
+ /* force reset to complete */
+ fusbh200_writel(fusbh200, temp & ~(PORT_RWC_BITS | PORT_RESET),
+ status_reg);
+ /* REVISIT: some hardware needs 550+ usec to clear
+ * this bit; seems too long to spin routinely...
+ */
+ retval = handshake(fusbh200, status_reg,
+ PORT_RESET, 0, 1000);
+ if (retval != 0) {
+ fusbh200_err (fusbh200, "port %d reset error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+
+ /* see what we found out */
+ temp = check_reset_complete (fusbh200, wIndex, status_reg,
+ fusbh200_readl(fusbh200, status_reg));
+ }
+
+ if (!(temp & (PORT_RESUME|PORT_RESET))) {
+ fusbh200->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+ }
+
+ /* transfer dedicated ports to the companion hc */
+ if ((temp & PORT_CONNECT) &&
+ test_bit(wIndex, &fusbh200->companion_ports)) {
+ temp &= ~PORT_RWC_BITS;
+ fusbh200_writel(fusbh200, temp, status_reg);
+ fusbh200_dbg(fusbh200, "port %d --> companion\n", wIndex + 1);
+ temp = fusbh200_readl(fusbh200, status_reg);
+ }
+
+ /*
+ * Even if OWNER is set, there's no harm letting khubd
+ * see the wPortStatus values (they should all be 0 except
+ * for PORT_POWER anyway).
+ */
+
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= fusbh200_port_speed(fusbh200, temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+
+ /* maybe the port was unsuspended without our knowledge */
+ if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+ status |= USB_PORT_STAT_SUSPEND;
+ } else if (test_bit(wIndex, &fusbh200->suspended_ports)) {
+ clear_bit(wIndex, &fusbh200->suspended_ports);
+ clear_bit(wIndex, &fusbh200->resuming_ports);
+ fusbh200->reset_done[wIndex] = 0;
+ if (temp & PORT_PE)
+ set_bit(wIndex, &fusbh200->port_c_suspend);
+ }
+
+ temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
+ if (temp1 & BMISR_OVC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (test_bit(wIndex, &fusbh200->port_c_suspend))
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+ dbg_port(fusbh200, "GetStatus", wIndex + 1, temp);
+ put_unaligned_le32(status, buf);
+ break;
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetPortFeature:
+ selector = wIndex >> 8;
+ wIndex &= 0xff;
+
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fusbh200_readl(fusbh200, status_reg);
+ temp &= ~PORT_RWC_BITS;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if ((temp & PORT_PE) == 0
+ || (temp & PORT_RESET) != 0)
+ goto error;
+
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have hostpc feature
+ */
+ fusbh200_writel(fusbh200, temp | PORT_SUSPEND, status_reg);
+ set_bit(wIndex, &fusbh200->suspended_ports);
+ break;
+ case USB_PORT_FEAT_RESET:
+ if (temp & PORT_RESUME)
+ goto error;
+ /* line status bits may report this as low speed,
+ * which can be fine if this root hub has a
+ * transaction translator built in.
+ */
+ fusbh200_dbg(fusbh200, "port %d reset\n", wIndex + 1);
+ temp |= PORT_RESET;
+ temp &= ~PORT_PE;
+
+ /*
+ * caller must wait, then call GetPortStatus
+ * usb 2.0 spec says 50 ms resets on root
+ */
+ fusbh200->reset_done [wIndex] = jiffies
+ + msecs_to_jiffies (50);
+ fusbh200_writel(fusbh200, temp, status_reg);
+ break;
+
+ /* For downstream facing ports (these): one hub port is put
+ * into test mode according to USB2 11.24.2.13, then the hub
+ * must be reset (which for root hub now means rmmod+modprobe,
+ * or else system reboot). See EHCI 2.3.9 and 4.14 for info
+ * about the EHCI-specific stuff.
+ */
+ case USB_PORT_FEAT_TEST:
+ if (!selector || selector > 5)
+ goto error;
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ fusbh200_quiesce(fusbh200);
+ spin_lock_irqsave(&fusbh200->lock, flags);
+
+ /* Put all enabled ports into suspend */
+ temp = fusbh200_readl(fusbh200, status_reg) & ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ fusbh200_writel(fusbh200, temp | PORT_SUSPEND,
+ status_reg);
+
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ fusbh200_halt(fusbh200);
+ spin_lock_irqsave(&fusbh200->lock, flags);
+
+ temp = fusbh200_readl(fusbh200, status_reg);
+ temp |= selector << 16;
+ fusbh200_writel(fusbh200, temp, status_reg);
+ break;
+
+ default:
+ goto error;
+ }
+ fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
+ break;
+
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return retval;
+}
+
+static void __maybe_unused fusbh200_relinquish_port(struct usb_hcd *hcd,
+ int portnum)
+{
+ return;
+}
+
+static int __maybe_unused fusbh200_port_handed_over(struct usb_hcd *hcd,
+ int portnum)
+{
+ return 0;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * There's basically three types of memory:
+ * - data used only by the HCD ... kmalloc is fine
+ * - async and periodic schedules, shared by HC and HCD ... these
+ * need to use dma_pool or dma_alloc_coherent
+ * - driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* Allocate the key transfer structures from the previously allocated pool */
+
+static inline void fusbh200_qtd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd,
+ dma_addr_t dma)
+{
+ memset (qtd, 0, sizeof *qtd);
+ qtd->qtd_dma = dma;
+ qtd->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
+ qtd->hw_next = FUSBH200_LIST_END(fusbh200);
+ qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+ INIT_LIST_HEAD (&qtd->qtd_list);
+}
+
+static struct fusbh200_qtd *fusbh200_qtd_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+ struct fusbh200_qtd *qtd;
+ dma_addr_t dma;
+
+ qtd = dma_pool_alloc (fusbh200->qtd_pool, flags, &dma);
+ if (qtd != NULL) {
+ fusbh200_qtd_init(fusbh200, qtd, dma);
+ }
+ return qtd;
+}
+
+static inline void fusbh200_qtd_free (struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
+{
+ dma_pool_free (fusbh200->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ /* clean qtds first, and know this is not linked */
+ if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
+ fusbh200_dbg (fusbh200, "unused qh not empty!\n");
+ BUG ();
+ }
+ if (qh->dummy)
+ fusbh200_qtd_free (fusbh200, qh->dummy);
+ dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
+ kfree(qh);
+}
+
+static struct fusbh200_qh *fusbh200_qh_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+ struct fusbh200_qh *qh;
+ dma_addr_t dma;
+
+ qh = kzalloc(sizeof *qh, GFP_ATOMIC);
+ if (!qh)
+ goto done;
+ qh->hw = (struct fusbh200_qh_hw *)
+ dma_pool_alloc(fusbh200->qh_pool, flags, &dma);
+ if (!qh->hw)
+ goto fail;
+ memset(qh->hw, 0, sizeof *qh->hw);
+ qh->qh_dma = dma;
+ // INIT_LIST_HEAD (&qh->qh_list);
+ INIT_LIST_HEAD (&qh->qtd_list);
+
+ /* dummy td enables safe urb queuing */
+ qh->dummy = fusbh200_qtd_alloc (fusbh200, flags);
+ if (qh->dummy == NULL) {
+ fusbh200_dbg (fusbh200, "no dummy td\n");
+ goto fail1;
+ }
+done:
+ return qh;
+fail1:
+ dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
+fail:
+ kfree(qh);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void fusbh200_mem_cleanup (struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->async)
+ qh_destroy(fusbh200, fusbh200->async);
+ fusbh200->async = NULL;
+
+ if (fusbh200->dummy)
+ qh_destroy(fusbh200, fusbh200->dummy);
+ fusbh200->dummy = NULL;
+
+ /* DMA consistent memory and pools */
+ if (fusbh200->qtd_pool)
+ dma_pool_destroy (fusbh200->qtd_pool);
+ fusbh200->qtd_pool = NULL;
+
+ if (fusbh200->qh_pool) {
+ dma_pool_destroy (fusbh200->qh_pool);
+ fusbh200->qh_pool = NULL;
+ }
+
+ if (fusbh200->itd_pool)
+ dma_pool_destroy (fusbh200->itd_pool);
+ fusbh200->itd_pool = NULL;
+
+ if (fusbh200->periodic)
+ dma_free_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
+ fusbh200->periodic_size * sizeof (u32),
+ fusbh200->periodic, fusbh200->periodic_dma);
+ fusbh200->periodic = NULL;
+
+ /* shadow periodic table */
+ kfree(fusbh200->pshadow);
+ fusbh200->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int fusbh200_mem_init (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+ int i;
+
+ /* QTDs for control/bulk/intr transfers */
+ fusbh200->qtd_pool = dma_pool_create ("fusbh200_qtd",
+ fusbh200_to_hcd(fusbh200)->self.controller,
+ sizeof (struct fusbh200_qtd),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fusbh200->qtd_pool) {
+ goto fail;
+ }
+
+ /* QHs for control/bulk/intr transfers */
+ fusbh200->qh_pool = dma_pool_create ("fusbh200_qh",
+ fusbh200_to_hcd(fusbh200)->self.controller,
+ sizeof(struct fusbh200_qh_hw),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fusbh200->qh_pool) {
+ goto fail;
+ }
+ fusbh200->async = fusbh200_qh_alloc (fusbh200, flags);
+ if (!fusbh200->async) {
+ goto fail;
+ }
+
+ /* ITD for high speed ISO transfers */
+ fusbh200->itd_pool = dma_pool_create ("fusbh200_itd",
+ fusbh200_to_hcd(fusbh200)->self.controller,
+ sizeof (struct fusbh200_itd),
+ 64 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fusbh200->itd_pool) {
+ goto fail;
+ }
+
+ /* Hardware periodic table */
+ fusbh200->periodic = (__le32 *)
+ dma_alloc_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
+ fusbh200->periodic_size * sizeof(__le32),
+ &fusbh200->periodic_dma, 0);
+ if (fusbh200->periodic == NULL) {
+ goto fail;
+ }
+
+ for (i = 0; i < fusbh200->periodic_size; i++)
+ fusbh200->periodic[i] = FUSBH200_LIST_END(fusbh200);
+
+ /* software shadow of hardware table */
+ fusbh200->pshadow = kcalloc(fusbh200->periodic_size, sizeof(void *), flags);
+ if (fusbh200->pshadow != NULL)
+ return 0;
+
+fail:
+ fusbh200_dbg (fusbh200, "couldn't init memory\n");
+ fusbh200_mem_cleanup (fusbh200);
+ return -ENOMEM;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number). We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint. URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd) records, and (along with
+ * interrupts) needs careful scheduling. Performance improvements can be
+ * an ongoing challenge. That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries. TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+
+static int
+qtd_fill(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd, dma_addr_t buf,
+ size_t len, int token, int maxpacket)
+{
+ int i, count;
+ u64 addr = buf;
+
+ /* one buffer entry per 4K ... first might be short or unaligned */
+ qtd->hw_buf[0] = cpu_to_hc32(fusbh200, (u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_hc32(fusbh200, (u32)(addr >> 32));
+ count = 0x1000 - (buf & 0x0fff); /* rest of that page */
+ if (likely (len < count)) /* ... iff needed */
+ count = len;
+ else {
+ buf += 0x1000;
+ buf &= ~0x0fff;
+
+ /* per-qtd limit: from 16K to 20K (best alignment) */
+ for (i = 1; count < len && i < 5; i++) {
+ addr = buf;
+ qtd->hw_buf[i] = cpu_to_hc32(fusbh200, (u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_hc32(fusbh200,
+ (u32)(addr >> 32));
+ buf += 0x1000;
+ if ((count + 0x1000) < len)
+ count += 0x1000;
+ else
+ count = len;
+ }
+
+ /* short packets may only terminate transfers */
+ if (count != len)
+ count -= (count % maxpacket);
+ }
+ qtd->hw_token = cpu_to_hc32(fusbh200, (count << 16) | token);
+ qtd->length = count;
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+qh_update (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh, struct fusbh200_qtd *qtd)
+{
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ /* writes to an active overlay are unsafe */
+ BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+ hw->hw_qtd_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ hw->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+
+ /* Except for control endpoints, we make hardware maintain data
+ * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+ * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+ * ever clear it.
+ */
+ if (!(hw->hw_info1 & cpu_to_hc32(fusbh200, QH_TOGGLE_CTL))) {
+ unsigned is_out, epnum;
+
+ is_out = qh->is_out;
+ epnum = (hc32_to_cpup(fusbh200, &hw->hw_info1) >> 8) & 0x0f;
+ if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
+ hw->hw_token &= ~cpu_to_hc32(fusbh200, QTD_TOGGLE);
+ usb_settoggle (qh->dev, epnum, is_out, 1);
+ }
+ }
+
+ hw->hw_token &= cpu_to_hc32(fusbh200, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void
+qh_refresh (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qtd *qtd;
+
+ if (list_empty (&qh->qtd_list))
+ qtd = qh->dummy;
+ else {
+ qtd = list_entry (qh->qtd_list.next,
+ struct fusbh200_qtd, qtd_list);
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (cpu_to_hc32(fusbh200, qtd->qtd_dma) == qh->hw->hw_current) {
+ qh->hw->hw_qtd_next = qtd->hw_next;
+ qtd = NULL;
+ }
+ }
+
+ if (qtd)
+ qh_update (fusbh200, qh, qtd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void qh_link_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+static void fusbh200_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ struct fusbh200_qh *qh = ep->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fusbh200->lock, flags);
+ qh->clearing_tt = 0;
+ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+ && fusbh200->rh_state == FUSBH200_RH_RUNNING)
+ qh_link_async(fusbh200, qh);
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+}
+
+static void fusbh200_clear_tt_buffer(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh,
+ struct urb *urb, u32 token)
+{
+
+ /* If an async split transaction gets an error or is unlinked,
+ * the TT buffer may be left in an indeterminate state. We
+ * have to clear the TT buffer.
+ *
+ * Note: this routine is never called for Isochronous transfers.
+ */
+ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+ struct usb_device *tt = urb->dev->tt->hub;
+
+ dev_dbg(&tt->dev,
+ "clear tt buffer port %d, a%d ep%d t%08x\n",
+ urb->dev->ttport, urb->dev->devnum,
+ usb_pipeendpoint(urb->pipe), token);
+
+ if (urb->dev->tt->hub !=
+ fusbh200_to_hcd(fusbh200)->self.root_hub) {
+ if (usb_hub_clear_tt_buffer(urb) == 0)
+ qh->clearing_tt = 1;
+ }
+ }
+}
+
+static int qtd_copy_status (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ size_t length,
+ u32 token
+)
+{
+ int status = -EINPROGRESS;
+
+ /* count IN/OUT bytes, not SETUP (even short packets) */
+ if (likely (QTD_PID (token) != 2))
+ urb->actual_length += length - QTD_LENGTH (token);
+
+ /* don't modify error codes */
+ if (unlikely(urb->unlinked))
+ return status;
+
+ /* force cleanup after short read; not always an error */
+ if (unlikely (IS_SHORT_READ (token)))
+ status = -EREMOTEIO;
+
+ /* serious "can't proceed" faults reported by the hardware */
+ if (token & QTD_STS_HALT) {
+ if (token & QTD_STS_BABBLE) {
+ /* FIXME "must" disable babbling device's port too */
+ status = -EOVERFLOW;
+ /* CERR nonzero + halt --> stall */
+ } else if (QTD_CERR(token)) {
+ status = -EPIPE;
+
+ /* In theory, more than one of the following bits can be set
+ * since they are sticky and the transaction is retried.
+ * Which to test first is rather arbitrary.
+ */
+ } else if (token & QTD_STS_MMF) {
+ /* fs/ls interrupt xfer missed the complete-split */
+ status = -EPROTO;
+ } else if (token & QTD_STS_DBE) {
+ status = (QTD_PID (token) == 1) /* IN ? */
+ ? -ENOSR /* hc couldn't read data */
+ : -ECOMM; /* hc couldn't write data */
+ } else if (token & QTD_STS_XACT) {
+ /* timeout, bad CRC, wrong PID, etc */
+ fusbh200_dbg(fusbh200, "devpath %s ep%d%s 3strikes\n",
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+ status = -EPROTO;
+ } else { /* unknown */
+ status = -EPROTO;
+ }
+
+ fusbh200_dbg(fusbh200,
+ "dev%d ep%d%s qtd token %08x --> status %d\n",
+ usb_pipedevice (urb->pipe),
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein (urb->pipe) ? "in" : "out",
+ token, status);
+ }
+
+ return status;
+}
+
+static void
+fusbh200_urb_done(struct fusbh200_hcd *fusbh200, struct urb *urb, int status)
+__releases(fusbh200->lock)
+__acquires(fusbh200->lock)
+{
+ if (likely (urb->hcpriv != NULL)) {
+ struct fusbh200_qh *qh = (struct fusbh200_qh *) urb->hcpriv;
+
+ /* S-mask in a QH means it's an interrupt urb */
+ if ((qh->hw->hw_info2 & cpu_to_hc32(fusbh200, QH_SMASK)) != 0) {
+
+ /* ... update hc-wide periodic stats (for usbfs) */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs--;
+ }
+ }
+
+ if (unlikely(urb->unlinked)) {
+ COUNT(fusbh200->stats.unlink);
+ } else {
+ /* report non-error and short read status as zero */
+ if (status == -EINPROGRESS || status == -EREMOTEIO)
+ status = 0;
+ COUNT(fusbh200->stats.complete);
+ }
+
+#ifdef FUSBH200_URB_TRACE
+ fusbh200_dbg (fusbh200,
+ "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein (urb->pipe) ? "in" : "out",
+ status,
+ urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+ /* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+ spin_unlock (&fusbh200->lock);
+ usb_hcd_giveback_urb(fusbh200_to_hcd(fusbh200), urb, status);
+ spin_lock (&fusbh200->lock);
+}
+
+static int qh_schedule (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+/*
+ * Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current. Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned
+qh_completions (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qtd *last, *end = qh->dummy;
+ struct list_head *entry, *tmp;
+ int last_status;
+ int stopped;
+ unsigned count = 0;
+ u8 state;
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ if (unlikely (list_empty (&qh->qtd_list)))
+ return count;
+
+ /* completions (or tasks on other cpus) must never clobber HALT
+ * till we've gone through and cleaned everything up, even when
+ * they add urbs to this qh's queue or mark them for unlinking.
+ *
+ * NOTE: unlinking expects to be done in queue order.
+ *
+ * It's a bug for qh->qh_state to be anything other than
+ * QH_STATE_IDLE, unless our caller is scan_async() or
+ * scan_intr().
+ */
+ state = qh->qh_state;
+ qh->qh_state = QH_STATE_COMPLETING;
+ stopped = (state == QH_STATE_IDLE);
+
+ rescan:
+ last = NULL;
+ last_status = -EINPROGRESS;
+ qh->needs_rescan = 0;
+
+ /* remove de-activated QTDs from front of queue.
+ * after faults (including short reads), cleanup this urb
+ * then let the queue advance.
+ * if queue is stopped, handles unlinks.
+ */
+ list_for_each_safe (entry, tmp, &qh->qtd_list) {
+ struct fusbh200_qtd *qtd;
+ struct urb *urb;
+ u32 token = 0;
+
+ qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
+ urb = qtd->urb;
+
+ /* clean up any state from previous QTD ...*/
+ if (last) {
+ if (likely (last->urb != urb)) {
+ fusbh200_urb_done(fusbh200, last->urb, last_status);
+ count++;
+ last_status = -EINPROGRESS;
+ }
+ fusbh200_qtd_free (fusbh200, last);
+ last = NULL;
+ }
+
+ /* ignore urbs submitted during completions we reported */
+ if (qtd == end)
+ break;
+
+ /* hardware copies qtd out of qh overlay */
+ rmb ();
+ token = hc32_to_cpu(fusbh200, qtd->hw_token);
+
+ /* always clean up qtds the hc de-activated */
+ retry_xacterr:
+ if ((token & QTD_STS_ACTIVE) == 0) {
+
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ fusbh200_dbg(fusbh200,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
+ /* on STALL, error, and short reads this urb must
+ * complete and all its qtds must be recycled.
+ */
+ if ((token & QTD_STS_HALT) != 0) {
+
+ /* retry transaction errors until we
+ * reach the software xacterr limit
+ */
+ if ((token & QTD_STS_XACT) &&
+ QTD_CERR(token) == 0 &&
+ ++qh->xacterrs < QH_XACTERR_MAX &&
+ !urb->unlinked) {
+ fusbh200_dbg(fusbh200,
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+ /* reset the token in the qtd and the
+ * qh overlay (which still contains
+ * the qtd) so that we pick up from
+ * where we left off
+ */
+ token &= ~QTD_STS_HALT;
+ token |= QTD_STS_ACTIVE |
+ (FUSBH200_TUNE_CERR << 10);
+ qtd->hw_token = cpu_to_hc32(fusbh200,
+ token);
+ wmb();
+ hw->hw_token = cpu_to_hc32(fusbh200,
+ token);
+ goto retry_xacterr;
+ }
+ stopped = 1;
+
+ /* magic dummy for some short reads; qh won't advance.
+ * that silicon quirk can kick in with this dummy too.
+ *
+ * other short reads won't stop the queue, including
+ * control transfers (status stage handles that) or
+ * most other single-qtd reads ... the queue stops if
+ * URB_SHORT_NOT_OK was set so the driver submitting
+ * the urbs could clean it up.
+ */
+ } else if (IS_SHORT_READ (token)
+ && !(qtd->hw_alt_next
+ & FUSBH200_LIST_END(fusbh200))) {
+ stopped = 1;
+ }
+
+ /* stop scanning when we reach qtds the hc is using */
+ } else if (likely (!stopped
+ && fusbh200->rh_state >= FUSBH200_RH_RUNNING)) {
+ break;
+
+ /* scan the whole queue for unlinks whenever it stops */
+ } else {
+ stopped = 1;
+
+ /* cancel everything if we halt, suspend, etc */
+ if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+ last_status = -ESHUTDOWN;
+
+ /* this qtd is active; skip it unless a previous qtd
+ * for its urb faulted, or its urb was canceled.
+ */
+ else if (last_status == -EINPROGRESS && !urb->unlinked)
+ continue;
+
+ /* qh unlinked; token in overlay may be most current */
+ if (state == QH_STATE_IDLE
+ && cpu_to_hc32(fusbh200, qtd->qtd_dma)
+ == hw->hw_current) {
+ token = hc32_to_cpu(fusbh200, hw->hw_token);
+
+ /* An unlink may leave an incomplete
+ * async transaction in the TT buffer.
+ * We have to clear it.
+ */
+ fusbh200_clear_tt_buffer(fusbh200, qh, urb, token);
+ }
+ }
+
+ /* unless we already know the urb's status, collect qtd status
+ * and update count of bytes transferred. in common short read
+ * cases with only one data qtd (including control transfers),
+ * queue processing won't halt. but with two or more qtds (for
+ * example, with a 32 KB transfer), when the first qtd gets a
+ * short read the second must be removed by hand.
+ */
+ if (last_status == -EINPROGRESS) {
+ last_status = qtd_copy_status(fusbh200, urb,
+ qtd->length, token);
+ if (last_status == -EREMOTEIO
+ && (qtd->hw_alt_next
+ & FUSBH200_LIST_END(fusbh200)))
+ last_status = -EINPROGRESS;
+
+ /* As part of low/full-speed endpoint-halt processing
+ * we must clear the TT buffer (11.17.5).
+ */
+ if (unlikely(last_status != -EINPROGRESS &&
+ last_status != -EREMOTEIO)) {
+ /* The TT's in some hubs malfunction when they
+ * receive this request following a STALL (they
+ * stop sending isochronous packets). Since a
+ * STALL can't leave the TT buffer in a busy
+ * state (if you believe Figures 11-48 - 11-51
+ * in the USB 2.0 spec), we won't clear the TT
+ * buffer in this case. Strictly speaking this
+ * is a violation of the spec.
+ */
+ if (last_status != -EPIPE)
+ fusbh200_clear_tt_buffer(fusbh200, qh, urb,
+ token);
+ }
+ }
+
+ /* if we're removing something not at the queue head,
+ * patch the hardware queue pointer.
+ */
+ if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+ last = list_entry (qtd->qtd_list.prev,
+ struct fusbh200_qtd, qtd_list);
+ last->hw_next = qtd->hw_next;
+ }
+
+ /* remove qtd; it's recycled after possible urb completion */
+ list_del (&qtd->qtd_list);
+ last = qtd;
+
+ /* reinit the xacterr counter for the next qtd */
+ qh->xacterrs = 0;
+ }
+
+ /* last urb's completion might still need calling */
+ if (likely (last != NULL)) {
+ fusbh200_urb_done(fusbh200, last->urb, last_status);
+ count++;
+ fusbh200_qtd_free (fusbh200, last);
+ }
+
+ /* Do we need to rescan for URBs dequeued during a giveback? */
+ if (unlikely(qh->needs_rescan)) {
+ /* If the QH is already unlinked, do the rescan now. */
+ if (state == QH_STATE_IDLE)
+ goto rescan;
+
+ /* Otherwise we have to wait until the QH is fully unlinked.
+ * Our caller will start an unlink if qh->needs_rescan is
+ * set. But if an unlink has already started, nothing needs
+ * to be done.
+ */
+ if (state != QH_STATE_LINKED)
+ qh->needs_rescan = 0;
+ }
+
+ /* restore original state; caller must unlink or relink */
+ qh->qh_state = state;
+
+ /* be sure the hardware's done with the qh before refreshing
+ * it after fault cleanup, or recovering from silicon wrongly
+ * overlaying the dummy qtd (which reduces DMA chatter).
+ */
+ if (stopped != 0 || hw->hw_qtd_next == FUSBH200_LIST_END(fusbh200)) {
+ switch (state) {
+ case QH_STATE_IDLE:
+ qh_refresh(fusbh200, qh);
+ break;
+ case QH_STATE_LINKED:
+ /* We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
+ * except maybe high bandwidth ...
+ */
+
+ /* Tell the caller to start an unlink */
+ qh->needs_rescan = 1;
+ break;
+ /* otherwise, unlink already started */
+ }
+ }
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+// ... and packet size, for any kind of endpoint descriptor
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/*
+ * reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list
+) {
+ struct list_head *entry, *temp;
+
+ list_for_each_safe (entry, temp, qtd_list) {
+ struct fusbh200_qtd *qtd;
+
+ qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
+ list_del (&qtd->qtd_list);
+ fusbh200_qtd_free (fusbh200, qtd);
+ }
+}
+
+/*
+ * create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *
+qh_urb_transaction (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *head,
+ gfp_t flags
+) {
+ struct fusbh200_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, this_sg_len, maxpacket;
+ int is_input;
+ u32 token;
+ int i;
+ struct scatterlist *sg;
+
+ /*
+ * URBs map to sequences of QTDs: one logical transaction
+ */
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ return NULL;
+ list_add_tail (&qtd->qtd_list, head);
+ qtd->urb = urb;
+
+ token = QTD_STS_ACTIVE;
+ token |= (FUSBH200_TUNE_CERR << 10);
+ /* for split transactions, SplitXState initialized to zero */
+
+ len = urb->transfer_buffer_length;
+ is_input = usb_pipein (urb->pipe);
+ if (usb_pipecontrol (urb->pipe)) {
+ /* SETUP pid */
+ qtd_fill(fusbh200, qtd, urb->setup_dma,
+ sizeof (struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ list_add_tail (&qtd->qtd_list, head);
+
+ /* for zero length DATA stages, STATUS is always IN */
+ if (len == 0)
+ token |= (1 /* "in" */ << 8);
+ }
+
+ /*
+ * data transfer stage: buffer setup
+ */
+ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ buf = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ buf = urb->transfer_dma;
+ this_sg_len = len;
+ }
+
+ if (is_input)
+ token |= (1 /* "in" */ << 8);
+ /* else it's already initted to "out" pid (0 << 8) */
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+ /*
+ * buffer gets wrapped in one or more qtds;
+ * last one may be "short" (including zero len)
+ * and may serve as a control status ack
+ */
+ for (;;) {
+ int this_qtd_len;
+
+ this_qtd_len = qtd_fill(fusbh200, qtd, buf, this_sg_len, token,
+ maxpacket);
+ this_sg_len -= this_qtd_len;
+ len -= this_qtd_len;
+ buf += this_qtd_len;
+
+ /*
+ * short reads advance to a "magic" dummy instead of the next
+ * qtd ... that forces the queue to stop, for manual cleanup.
+ * (this will usually be overridden later.)
+ */
+ if (is_input)
+ qtd->hw_alt_next = fusbh200->async->hw->hw_alt_next;
+
+ /* qh makes control packets use qtd toggle; maybe switch it */
+ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+ token ^= QTD_TOGGLE;
+
+ if (likely(this_sg_len <= 0)) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ buf = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
+
+ qtd_prev = qtd;
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ list_add_tail (&qtd->qtd_list, head);
+ }
+
+ /*
+ * unless the caller requires manual cleanup after short reads,
+ * have the alt_next mechanism keep the queue running after the
+ * last data qtd (the only one, for control and most other cases).
+ */
+ if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+ || usb_pipecontrol (urb->pipe)))
+ qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+
+ /*
+ * control requests may need a terminating data "status" ack;
+ * other OUT ones may need a terminating short packet
+ * (zero length).
+ */
+ if (likely (urb->transfer_buffer_length != 0)) {
+ int one_more = 0;
+
+ if (usb_pipecontrol (urb->pipe)) {
+ one_more = 1;
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+ } else if (usb_pipeout(urb->pipe)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && !(urb->transfer_buffer_length % maxpacket)) {
+ one_more = 1;
+ }
+ if (one_more) {
+ qtd_prev = qtd;
+ qtd = fusbh200_qtd_alloc (fusbh200, flags);
+ if (unlikely (!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+ list_add_tail (&qtd->qtd_list, head);
+
+ /* never any data in such packets */
+ qtd_fill(fusbh200, qtd, 0, 0, token, 0);
+ }
+ }
+
+ /* by default, enable interrupt on urb completion */
+ if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(fusbh200, QTD_IOC);
+ return head;
+
+cleanup:
+ qtd_list_free (fusbh200, urb, head);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// Would be best to create all qh's from config descriptors,
+// when each interface/altsetting is established. Unlink
+// any previous qh and cancel its urbs first; endpoints are
+// implicitly reset then (data toggle too).
+// That'd mean updating how usbcore talks to HCDs. (2.7?)
+
+
+/*
+ * Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled. For highspeed, that's
+ * just one microframe in the s-mask. For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct fusbh200_qh *
+qh_make (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ gfp_t flags
+) {
+ struct fusbh200_qh *qh = fusbh200_qh_alloc (fusbh200, flags);
+ u32 info1 = 0, info2 = 0;
+ int is_input, type;
+ int maxp = 0;
+ struct usb_tt *tt = urb->dev->tt;
+ struct fusbh200_qh_hw *hw;
+
+ if (!qh)
+ return qh;
+
+ /*
+ * init endpoint/device data for this QH
+ */
+ info1 |= usb_pipeendpoint (urb->pipe) << 8;
+ info1 |= usb_pipedevice (urb->pipe) << 0;
+
+ is_input = usb_pipein (urb->pipe);
+ type = usb_pipetype (urb->pipe);
+ maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
+
+ /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
+ * acts like up to 3KB, but is built from smaller packets.
+ */
+ if (max_packet(maxp) > 1024) {
+ fusbh200_dbg(fusbh200, "bogus qh maxpacket %d\n", max_packet(maxp));
+ goto done;
+ }
+
+ /* Compute interrupt scheduling parameters just once, and save.
+ * - allowing for high bandwidth, how many nsec/uframe are used?
+ * - split transactions need a second CSPLIT uframe; same question
+ * - splits also need a schedule gap (for full/low speed I/O)
+ * - qh has a polling interval
+ *
+ * For control/bulk requests, the HC or TT handles these.
+ */
+ if (type == PIPE_INTERRUPT) {
+ qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ is_input, 0,
+ hb_mult(maxp) * max_packet(maxp)));
+ qh->start = NO_FRAME;
+
+ if (urb->dev->speed == USB_SPEED_HIGH) {
+ qh->c_usecs = 0;
+ qh->gap_uf = 0;
+
+ qh->period = urb->interval >> 3;
+ if (qh->period == 0 && urb->interval != 1) {
+ /* NOTE interval 2 or 4 uframes could work.
+ * But interval 1 scheduling is simpler, and
+ * includes high bandwidth.
+ */
+ urb->interval = 1;
+ } else if (qh->period > fusbh200->periodic_size) {
+ qh->period = fusbh200->periodic_size;
+ urb->interval = qh->period << 3;
+ }
+ } else {
+ int think_time;
+
+ /* gap is f(FS/LS transfer times) */
+ qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
+ is_input, 0, maxp) / (125 * 1000);
+
+ /* FIXME this just approximates SPLIT/CSPLIT times */
+ if (is_input) { // SPLIT, gap, CSPLIT+DATA
+ qh->c_usecs = qh->usecs + HS_USECS (0);
+ qh->usecs = HS_USECS (1);
+ } else { // SPLIT+DATA, gap, CSPLIT
+ qh->usecs += HS_USECS (1);
+ qh->c_usecs = HS_USECS (0);
+ }
+
+ think_time = tt ? tt->think_time : 0;
+ qh->tt_usecs = NS_TO_US (think_time +
+ usb_calc_bus_time (urb->dev->speed,
+ is_input, 0, max_packet (maxp)));
+ qh->period = urb->interval;
+ if (qh->period > fusbh200->periodic_size) {
+ qh->period = fusbh200->periodic_size;
+ urb->interval = qh->period;
+ }
+ }
+ }
+
+ /* support for tt scheduling, and access to toggles */
+ qh->dev = urb->dev;
+
+ /* using TT? */
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ info1 |= QH_LOW_SPEED;
+ /* FALL THROUGH */
+
+ case USB_SPEED_FULL:
+ /* EPS 0 means "full" */
+ if (type != PIPE_INTERRUPT)
+ info1 |= (FUSBH200_TUNE_RL_TT << 28);
+ if (type == PIPE_CONTROL) {
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ }
+ info1 |= maxp << 16;
+
+ info2 |= (FUSBH200_TUNE_MULT_TT << 30);
+
+ /* Some Freescale processors have an erratum in which the
+ * port number in the queue head was 0..N-1 instead of 1..N.
+ */
+ if (fusbh200_has_fsl_portno_bug(fusbh200))
+ info2 |= (urb->dev->ttport-1) << 23;
+ else
+ info2 |= urb->dev->ttport << 23;
+
+ /* set the address of the TT; for TDI's integrated
+ * root hub tt, leave it zeroed.
+ */
+ if (tt && tt->hub != fusbh200_to_hcd(fusbh200)->self.root_hub)
+ info2 |= tt->hub->devnum << 16;
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+ break;
+
+ case USB_SPEED_HIGH: /* no TT involved */
+ info1 |= QH_HIGH_SPEED;
+ if (type == PIPE_CONTROL) {
+ info1 |= (FUSBH200_TUNE_RL_HS << 28);
+ info1 |= 64 << 16; /* usb2 fixed maxpacket */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ info2 |= (FUSBH200_TUNE_MULT_HS << 30);
+ } else if (type == PIPE_BULK) {
+ info1 |= (FUSBH200_TUNE_RL_HS << 28);
+ /* The USB spec says that high speed bulk endpoints
+ * always use 512 byte maxpacket. But some device
+ * vendors decided to ignore that, and MSFT is happy
+ * to help them do so. So now people expect to use
+ * such nonconformant devices with Linux too; sigh.
+ */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= (FUSBH200_TUNE_MULT_HS << 30);
+ } else { /* PIPE_INTERRUPT */
+ info1 |= max_packet (maxp) << 16;
+ info2 |= hb_mult (maxp) << 30;
+ }
+ break;
+ default:
+ fusbh200_dbg(fusbh200, "bogus dev %p speed %d\n", urb->dev,
+ urb->dev->speed);
+done:
+ qh_destroy(fusbh200, qh);
+ return NULL;
+ }
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+ /* init as live, toggle clear, advance to dummy */
+ qh->qh_state = QH_STATE_IDLE;
+ hw = qh->hw;
+ hw->hw_info1 = cpu_to_hc32(fusbh200, info1);
+ hw->hw_info2 = cpu_to_hc32(fusbh200, info2);
+ qh->is_out = !is_input;
+ usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
+ qh_refresh (fusbh200, qh);
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_async(struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ fusbh200_poll_ASS(fusbh200);
+ turn_on_io_watchdog(fusbh200);
+}
+
+static void disable_async(struct fusbh200_hcd *fusbh200)
+{
+ if (--fusbh200->async_count)
+ return;
+
+ /* The async schedule and async_unlink list are supposed to be empty */
+ WARN_ON(fusbh200->async->qh_next.qh || fusbh200->async_unlink);
+
+ /* Don't turn off the schedule until ASS is 1 */
+ fusbh200_poll_ASS(fusbh200);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue. */
+
+static void qh_link_async (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ __hc32 dma = QH_NEXT(fusbh200, qh->qh_dma);
+ struct fusbh200_qh *head;
+
+ /* Don't link a QH if there's a Clear-TT-Buffer pending */
+ if (unlikely(qh->clearing_tt))
+ return;
+
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+ /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ qh_refresh(fusbh200, qh);
+
+ /* splice right after start */
+ head = fusbh200->async;
+ qh->qh_next = head->qh_next;
+ qh->hw->hw_next = head->hw->hw_next;
+ wmb ();
+
+ head->qh_next.qh = qh;
+ head->hw->hw_next = dma;
+
+ qh->xacterrs = 0;
+ qh->qh_state = QH_STATE_LINKED;
+ /* qtd completions reported later by interrupt */
+
+ enable_async(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct fusbh200_qh *qh_append_tds (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ int epnum,
+ void **ptr
+)
+{
+ struct fusbh200_qh *qh = NULL;
+ __hc32 qh_addr_mask = cpu_to_hc32(fusbh200, 0x7f);
+
+ qh = (struct fusbh200_qh *) *ptr;
+ if (unlikely (qh == NULL)) {
+ /* can't sleep here, we have fusbh200->lock... */
+ qh = qh_make (fusbh200, urb, GFP_ATOMIC);
+ *ptr = qh;
+ }
+ if (likely (qh != NULL)) {
+ struct fusbh200_qtd *qtd;
+
+ if (unlikely (list_empty (qtd_list)))
+ qtd = NULL;
+ else
+ qtd = list_entry (qtd_list->next, struct fusbh200_qtd,
+ qtd_list);
+
+ /* control qh may need patching ... */
+ if (unlikely (epnum == 0)) {
+
+ /* usb_reset_device() briefly reverts to address 0 */
+ if (usb_pipedevice (urb->pipe) == 0)
+ qh->hw->hw_info1 &= ~qh_addr_mask;
+ }
+
+ /* just one way to queue requests: swap with the dummy qtd.
+ * only hc or qh_refresh() ever modify the overlay.
+ */
+ if (likely (qtd != NULL)) {
+ struct fusbh200_qtd *dummy;
+ dma_addr_t dma;
+ __hc32 token;
+
+ /* to avoid racing the HC, use the dummy td instead of
+ * the first td of our list (becomes new dummy). both
+ * tds stay deactivated until we're done, when the
+ * HC is allowed to fetch the old dummy (4.10.2).
+ */
+ token = qtd->hw_token;
+ qtd->hw_token = HALT_BIT(fusbh200);
+
+ dummy = qh->dummy;
+
+ dma = dummy->qtd_dma;
+ *dummy = *qtd;
+ dummy->qtd_dma = dma;
+
+ list_del (&qtd->qtd_list);
+ list_add (&dummy->qtd_list, qtd_list);
+ list_splice_tail(qtd_list, &qh->qtd_list);
+
+ fusbh200_qtd_init(fusbh200, qtd, qtd->qtd_dma);
+ qh->dummy = qtd;
+
+ /* hc must see the new dummy at list end */
+ dma = qtd->qtd_dma;
+ qtd = list_entry (qh->qtd_list.prev,
+ struct fusbh200_qtd, qtd_list);
+ qtd->hw_next = QTD_NEXT(fusbh200, dma);
+
+ /* let the hc process these next qtds */
+ wmb ();
+ dummy->hw_token = token;
+
+ urb->hcpriv = qh;
+ }
+ }
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+submit_async (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ int epnum;
+ unsigned long flags;
+ struct fusbh200_qh *qh = NULL;
+ int rc;
+
+ epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef FUSBH200_URB_TRACE
+ {
+ struct fusbh200_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct fusbh200_qtd, qtd_list);
+ fusbh200_dbg(fusbh200,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
+#endif
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+ rc = -ESHUTDOWN;
+ goto done;
+ }
+ rc = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+ if (unlikely(rc))
+ goto done;
+
+ qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ if (unlikely(qh == NULL)) {
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ /* Control/bulk operations through TTs don't need scheduling,
+ * the HC and TT handle it when the TT has a buffer ready.
+ */
+ if (likely (qh->qh_state == QH_STATE_IDLE))
+ qh_link_async(fusbh200, qh);
+ done:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ if (unlikely (qh == NULL))
+ qtd_list_free (fusbh200, urb, qtd_list);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void single_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qh *prev;
+
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK;
+ if (fusbh200->async_unlink)
+ fusbh200->async_unlink_last->unlink_next = qh;
+ else
+ fusbh200->async_unlink = qh;
+ fusbh200->async_unlink_last = qh;
+
+ /* Unlink it from the schedule */
+ prev = fusbh200->async;
+ while (prev->qh_next.qh != qh)
+ prev = prev->qh_next.qh;
+
+ prev->hw->hw_next = qh->hw->hw_next;
+ prev->qh_next = qh->qh_next;
+ if (fusbh200->qh_scan_next == qh)
+ fusbh200->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct fusbh200_hcd *fusbh200, bool nested)
+{
+ /*
+ * Do nothing if an IAA cycle is already running or
+ * if one will be started shortly.
+ */
+ if (fusbh200->async_iaa || fusbh200->async_unlinking)
+ return;
+
+ /* Do all the waiting QHs at once */
+ fusbh200->async_iaa = fusbh200->async_unlink;
+ fusbh200->async_unlink = NULL;
+
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING)) {
+ if (!nested) /* Avoid recursion */
+ end_unlink_async(fusbh200);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(fusbh200->rh_state == FUSBH200_RH_RUNNING)) {
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ fusbh200_writel(fusbh200, fusbh200->command | CMD_IAAD,
+ &fusbh200->regs->command);
+ fusbh200_readl(fusbh200, &fusbh200->regs->command);
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IAA_WATCHDOG, true);
+ }
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh;
+
+ /* Process the idle QHs */
+ restart:
+ fusbh200->async_unlinking = true;
+ while (fusbh200->async_iaa) {
+ qh = fusbh200->async_iaa;
+ fusbh200->async_iaa = qh->unlink_next;
+ qh->unlink_next = NULL;
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ qh_completions(fusbh200, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ fusbh200->rh_state == FUSBH200_RH_RUNNING)
+ qh_link_async(fusbh200, qh);
+ disable_async(fusbh200);
+ }
+ fusbh200->async_unlinking = false;
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fusbh200->async_unlink) {
+ start_iaa_cycle(fusbh200, true);
+ if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING))
+ goto restart;
+ }
+}
+
+static void unlink_empty_async(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh, *next;
+ bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
+ bool check_unlinks_later = false;
+
+ /* Unlink all the async QHs that have been empty for a timer cycle */
+ next = fusbh200->async->qh_next.qh;
+ while (next) {
+ qh = next;
+ next = qh->qh_next.qh;
+
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ if (!stopped && qh->unlink_cycle ==
+ fusbh200->async_unlink_cycle)
+ check_unlinks_later = true;
+ else
+ single_unlink_async(fusbh200, qh);
+ }
+ }
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fusbh200->async_unlink)
+ start_iaa_cycle(fusbh200, false);
+
+ /* QHs that haven't been empty for long enough will be handled later */
+ if (check_unlinks_later) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
+ ++fusbh200->async_unlink_cycle;
+ }
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own fusbh200->lock */
+
+static void start_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ /*
+ * If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ single_unlink_async(fusbh200, qh);
+ start_iaa_cycle(fusbh200, false);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async (struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh;
+ bool check_unlinks_later = false;
+
+ fusbh200->qh_scan_next = fusbh200->async->qh_next.qh;
+ while (fusbh200->qh_scan_next) {
+ qh = fusbh200->qh_scan_next;
+ fusbh200->qh_scan_next = qh->qh_next.qh;
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fusbh200->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fusbh200->qh_scan_next is adjusted
+ * in single_unlink_async().
+ */
+ temp = qh_completions(fusbh200, qh);
+ if (qh->needs_rescan) {
+ start_unlink_async(fusbh200, qh);
+ } else if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ qh->unlink_cycle = fusbh200->async_unlink_cycle;
+ check_unlinks_later = true;
+ } else if (temp != 0)
+ goto rescan;
+ }
+ }
+
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && fusbh200->rh_state == FUSBH200_RH_RUNNING &&
+ !(fusbh200->enabled_hrtimer_events &
+ BIT(FUSBH200_HRTIMER_ASYNC_UNLINKS))) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
+ ++fusbh200->async_unlink_cycle;
+ }
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI scheduled transaction support: interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+
+static int fusbh200_get_frame (struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd
+ * @tag: hardware tag for type of this record
+ */
+static union fusbh200_shadow *
+periodic_next_shadow(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
+ __hc32 tag)
+{
+ switch (hc32_to_cpu(fusbh200, tag)) {
+ case Q_TYPE_QH:
+ return &periodic->qh->qh_next;
+ case Q_TYPE_FSTN:
+ return &periodic->fstn->fstn_next;
+ default:
+ return &periodic->itd->itd_next;
+ }
+}
+
+static __hc32 *
+shadow_next_periodic(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
+ __hc32 tag)
+{
+ switch (hc32_to_cpu(fusbh200, tag)) {
+ /* our fusbh200_shadow.qh is actually software part */
+ case Q_TYPE_QH:
+ return &periodic->qh->hw->hw_next;
+ /* others are hw parts */
+ default:
+ return periodic->hw_next;
+ }
+}
+
+/* caller must hold fusbh200->lock */
+static void periodic_unlink (struct fusbh200_hcd *fusbh200, unsigned frame, void *ptr)
+{
+ union fusbh200_shadow *prev_p = &fusbh200->pshadow[frame];
+ __hc32 *hw_p = &fusbh200->periodic[frame];
+ union fusbh200_shadow here = *prev_p;
+
+ /* find predecessor of "ptr"; hw and shadow lists are in sync */
+ while (here.ptr && here.ptr != ptr) {
+ prev_p = periodic_next_shadow(fusbh200, prev_p,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+ hw_p = shadow_next_periodic(fusbh200, &here,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+ here = *prev_p;
+ }
+ /* an interrupt entry (at list end) could have been shared */
+ if (!here.ptr)
+ return;
+
+ /* update shadow and hardware lists ... the old "next" pointers
+ * from ptr may still be in use, the caller updates them.
+ */
+ *prev_p = *periodic_next_shadow(fusbh200, &here,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+
+ *hw_p = *shadow_next_periodic(fusbh200, &here,
+ Q_NEXT_TYPE(fusbh200, *hw_p));
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short
+periodic_usecs (struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe)
+{
+ __hc32 *hw_p = &fusbh200->periodic [frame];
+ union fusbh200_shadow *q = &fusbh200->pshadow [frame];
+ unsigned usecs = 0;
+ struct fusbh200_qh_hw *hw;
+
+ while (q->ptr) {
+ switch (hc32_to_cpu(fusbh200, Q_NEXT_TYPE(fusbh200, *hw_p))) {
+ case Q_TYPE_QH:
+ hw = q->qh->hw;
+ /* is it in the S-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fusbh200, 1 << uframe))
+ usecs += q->qh->usecs;
+ /* ... or C-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fusbh200,
+ 1 << (8 + uframe)))
+ usecs += q->qh->c_usecs;
+ hw_p = &hw->hw_next;
+ q = &q->qh->qh_next;
+ break;
+ // case Q_TYPE_FSTN:
+ default:
+ /* for "save place" FSTNs, count the relevant INTR
+ * bandwidth from the previous frame
+ */
+ if (q->fstn->hw_prev != FUSBH200_LIST_END(fusbh200)) {
+ fusbh200_dbg (fusbh200, "ignoring FSTN cost ...\n");
+ }
+ hw_p = &q->fstn->hw_next;
+ q = &q->fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ if (q->itd->hw_transaction[uframe])
+ usecs += q->itd->stream->usecs;
+ hw_p = &q->itd->hw_next;
+ q = &q->itd->itd_next;
+ break;
+ }
+ }
+ if (usecs > fusbh200->uframe_periodic_max)
+ fusbh200_err (fusbh200, "uframe %d sched overrun: %d usecs\n",
+ frame * 8 + uframe, usecs);
+ return usecs;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
+{
+ if (!dev1->tt || !dev2->tt)
+ return 0;
+ if (dev1->tt != dev2->tt)
+ return 0;
+ if (dev1->tt->multi)
+ return dev1->ttport == dev2->ttport;
+ else
+ return 1;
+}
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision (
+ struct fusbh200_hcd *fusbh200,
+ unsigned period,
+ struct usb_device *dev,
+ unsigned frame,
+ u32 uf_mask
+)
+{
+ if (period == 0) /* error */
+ return 0;
+
+ /* note bandwidth wastage: split never follows csplit
+ * (different dev or endpoint) until the next uframe.
+ * calling convention doesn't make that distinction.
+ */
+ for (; frame < fusbh200->periodic_size; frame += period) {
+ union fusbh200_shadow here;
+ __hc32 type;
+ struct fusbh200_qh_hw *hw;
+
+ here = fusbh200->pshadow [frame];
+ type = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [frame]);
+ while (here.ptr) {
+ switch (hc32_to_cpu(fusbh200, type)) {
+ case Q_TYPE_ITD:
+ type = Q_NEXT_TYPE(fusbh200, here.itd->hw_next);
+ here = here.itd->itd_next;
+ continue;
+ case Q_TYPE_QH:
+ hw = here.qh->hw;
+ if (same_tt (dev, here.qh->dev)) {
+ u32 mask;
+
+ mask = hc32_to_cpu(fusbh200,
+ hw->hw_info2);
+ /* "knows" no gap is needed */
+ mask |= mask >> 8;
+ if (mask & uf_mask)
+ break;
+ }
+ type = Q_NEXT_TYPE(fusbh200, hw->hw_next);
+ here = here.qh->qh_next;
+ continue;
+ // case Q_TYPE_FSTN:
+ default:
+ fusbh200_dbg (fusbh200,
+ "periodic frame %d bogus type %d\n",
+ frame, type);
+ }
+
+ /* collision or error */
+ return 0;
+ }
+ }
+
+ /* no collision */
+ return 1;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_periodic(struct fusbh200_hcd *fusbh200)
+{
+ if (fusbh200->periodic_count++)
+ return;
+
+ /* Stop waiting to turn off the periodic schedule */
+ fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_PERIODIC);
+
+ /* Don't start the schedule until PSS is 0 */
+ fusbh200_poll_PSS(fusbh200);
+ turn_on_io_watchdog(fusbh200);
+}
+
+static void disable_periodic(struct fusbh200_hcd *fusbh200)
+{
+ if (--fusbh200->periodic_count)
+ return;
+
+ /* Don't turn off the schedule until PSS is 1 */
+ fusbh200_poll_PSS(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; fusbh200 0.96+)
+ */
+static void qh_link_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
+
+ dev_dbg (&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, hc32_to_cpup(fusbh200, &qh->hw->hw_info2)
+ & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < fusbh200->periodic_size; i += period) {
+ union fusbh200_shadow *prev = &fusbh200->pshadow[i];
+ __hc32 *hw_p = &fusbh200->periodic[i];
+ union fusbh200_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fusbh200, *hw_p);
+ if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fusbh200, prev, type);
+ hw_p = shadow_next_periodic(fusbh200, &here, type);
+ here = *prev;
+ }
+
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = &here.qh->qh_next;
+ hw_p = &here.qh->hw->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw->hw_next = *hw_p;
+ wmb ();
+ prev->qh = qh;
+ *hw_p = QH_NEXT (fusbh200, qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh->xacterrs = 0;
+
+ /* update per-qh bandwidth for usbfs */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ list_add(&qh->intr_node, &fusbh200->intr_qh_list);
+
+ /* maybe enable periodic schedule processing */
+ ++fusbh200->intr_count;
+ enable_periodic(fusbh200);
+}
+
+static void qh_unlink_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
+
+ /* high bandwidth, or otherwise part of every microframe */
+ if ((period = qh->period) == 0)
+ period = 1;
+
+ for (i = qh->start; i < fusbh200->periodic_size; i += period)
+ periodic_unlink (fusbh200, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg (&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period,
+ hc32_to_cpup(fusbh200, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
+ qh->qh_state = QH_STATE_UNLINK;
+ qh->qh_next.ptr = NULL;
+
+ if (fusbh200->qh_scan_next == qh)
+ fusbh200->qh_scan_next = list_entry(qh->intr_node.next,
+ struct fusbh200_qh, intr_node);
+ list_del(&qh->intr_node);
+}
+
+static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ qh_unlink_periodic (fusbh200, qh);
+
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
+ */
+ qh->unlink_cycle = fusbh200->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ if (fusbh200->intr_unlink)
+ fusbh200->intr_unlink_last->unlink_next = qh;
+ else
+ fusbh200->intr_unlink = qh;
+ fusbh200->intr_unlink_last = qh;
+
+ if (fusbh200->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+ fusbh200_handle_intr_unlinks(fusbh200);
+ else if (fusbh200->intr_unlink == qh) {
+ fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
+ ++fusbh200->intr_unlink_cycle;
+ }
+}
+
+static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ struct fusbh200_qh_hw *hw = qh->hw;
+ int rc;
+
+ qh->qh_state = QH_STATE_IDLE;
+ hw->hw_next = FUSBH200_LIST_END(fusbh200);
+
+ qh_completions(fusbh200, qh);
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list) && fusbh200->rh_state == FUSBH200_RH_RUNNING) {
+ rc = qh_schedule(fusbh200, qh);
+
+ /* An error here likely indicates handshake failure
+ * or no space left in the schedule. Neither fault
+ * should happen often ...
+ *
+ * FIXME kill the now-dysfunctional queued urbs
+ */
+ if (rc != 0)
+ fusbh200_err(fusbh200, "can't reschedule qh %p, err %d\n",
+ qh, rc);
+ }
+
+ /* maybe turn off periodic schedule */
+ --fusbh200->intr_count;
+ disable_periodic(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_period (
+ struct fusbh200_hcd *fusbh200,
+ unsigned frame,
+ unsigned uframe,
+ unsigned period,
+ unsigned usecs
+) {
+ int claimed;
+
+ /* complete split running into next frame?
+ * given FSTN support, we could sometimes check...
+ */
+ if (uframe >= 8)
+ return 0;
+
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = fusbh200->uframe_periodic_max - usecs;
+
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely (period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs (fusbh200, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < fusbh200->periodic_size);
+
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs (fusbh200, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < fusbh200->periodic_size);
+ }
+
+ // success!
+ return 1;
+}
+
+static int check_intr_schedule (
+ struct fusbh200_hcd *fusbh200,
+ unsigned frame,
+ unsigned uframe,
+ const struct fusbh200_qh *qh,
+ __hc32 *c_maskp
+)
+{
+ int retval = -ENOSPC;
+ u8 mask = 0;
+
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
+
+ if (!check_period (fusbh200, frame, uframe, qh->period, qh->usecs))
+ goto done;
+ if (!qh->c_usecs) {
+ retval = 0;
+ *c_maskp = 0;
+ goto done;
+ }
+
+ /* Make sure this tt's buffer is also available for CSPLITs.
+ * We pessimize a bit; probably the typical full speed case
+ * doesn't need the second CSPLIT.
+ *
+ * NOTE: both SPLIT and CSPLIT could be checked in just
+ * one smart pass...
+ */
+ mask = 0x03 << (uframe + qh->gap_uf);
+ *c_maskp = cpu_to_hc32(fusbh200, mask << 8);
+
+ mask |= 1 << uframe;
+ if (tt_no_collision (fusbh200, qh->period, qh->dev, frame, mask)) {
+ if (!check_period (fusbh200, frame, uframe + qh->gap_uf + 1,
+ qh->period, qh->c_usecs))
+ goto done;
+ if (!check_period (fusbh200, frame, uframe + qh->gap_uf,
+ qh->period, qh->c_usecs))
+ goto done;
+ retval = 0;
+ }
+done:
+ return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+ int status;
+ unsigned uframe;
+ __hc32 c_mask;
+ unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ struct fusbh200_qh_hw *hw = qh->hw;
+
+ qh_refresh(fusbh200, qh);
+ hw->hw_next = FUSBH200_LIST_END(fusbh200);
+ frame = qh->start;
+
+ /* reuse the previous schedule slots, if we can */
+ if (frame < qh->period) {
+ uframe = ffs(hc32_to_cpup(fusbh200, &hw->hw_info2) & QH_SMASK);
+ status = check_intr_schedule (fusbh200, frame, --uframe,
+ qh, &c_mask);
+ } else {
+ uframe = 0;
+ c_mask = 0;
+ status = -ENOSPC;
+ }
+
+ /* else scan the schedule to find a group of slots such that all
+ * uframes have enough periodic bandwidth available.
+ */
+ if (status) {
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->period) {
+ int i;
+
+ for (i = qh->period; status && i > 0; --i) {
+ frame = ++fusbh200->random_frame % qh->period;
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule (fusbh200,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ }
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule (fusbh200, 0, 0, qh, &c_mask);
+ }
+ if (status)
+ goto done;
+ qh->start = frame;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(fusbh200, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= qh->period
+ ? cpu_to_hc32(fusbh200, 1 << uframe)
+ : cpu_to_hc32(fusbh200, QH_SMASK);
+ hw->hw_info2 |= c_mask;
+ } else
+ fusbh200_dbg (fusbh200, "reused qh %p schedule\n", qh);
+
+ /* stuff into the periodic schedule */
+ qh_link_periodic(fusbh200, qh);
+done:
+ return status;
+}
+
+static int intr_submit (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ unsigned epnum;
+ unsigned long flags;
+ struct fusbh200_qh *qh;
+ int status;
+ struct list_head empty;
+
+ /* get endpoint and transfer/schedule data */
+ epnum = urb->ep->desc.bEndpointAddress;
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+
+ /* get qh and force any scheduling errors */
+ INIT_LIST_HEAD (&empty);
+ qh = qh_append_tds(fusbh200, urb, &empty, epnum, &urb->ep->hcpriv);
+ if (qh == NULL) {
+ status = -ENOMEM;
+ goto done;
+ }
+ if (qh->qh_state == QH_STATE_IDLE) {
+ if ((status = qh_schedule (fusbh200, qh)) != 0)
+ goto done;
+ }
+
+ /* then queue the urb's tds to the qh */
+ qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ BUG_ON (qh == NULL);
+
+ /* ... update usbfs periodic stats */
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs++;
+
+done:
+ if (unlikely(status))
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+done_not_linked:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ if (status)
+ qtd_list_free (fusbh200, urb, qtd_list);
+
+ return status;
+}
+
+static void scan_intr(struct fusbh200_hcd *fusbh200)
+{
+ struct fusbh200_qh *qh;
+
+ list_for_each_entry_safe(qh, fusbh200->qh_scan_next, &fusbh200->intr_qh_list,
+ intr_node) {
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fusbh200->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fusbh200->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(fusbh200, qh);
+ if (unlikely(qh->needs_rescan ||
+ (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED)))
+ start_unlink_intr(fusbh200, qh);
+ else if (temp != 0)
+ goto rescan;
+ }
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fusbh200_iso_stream ops work with both ITD and SITD */
+
+static struct fusbh200_iso_stream *
+iso_stream_alloc (gfp_t mem_flags)
+{
+ struct fusbh200_iso_stream *stream;
+
+ stream = kzalloc(sizeof *stream, mem_flags);
+ if (likely (stream != NULL)) {
+ INIT_LIST_HEAD(&stream->td_list);
+ INIT_LIST_HEAD(&stream->free_list);
+ stream->next_uframe = -1;
+ }
+ return stream;
+}
+
+static void
+iso_stream_init (
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_iso_stream *stream,
+ struct usb_device *dev,
+ int pipe,
+ unsigned interval
+)
+{
+ u32 buf1;
+ unsigned epnum, maxp;
+ int is_input;
+ long bandwidth;
+ unsigned multi;
+
+ /*
+ * this might be a "high bandwidth" highspeed endpoint,
+ * as encoded in the ep descriptor's wMaxPacket field
+ */
+ epnum = usb_pipeendpoint (pipe);
+ is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
+ maxp = usb_maxpacket(dev, pipe, !is_input);
+ if (is_input) {
+ buf1 = (1 << 11);
+ } else {
+ buf1 = 0;
+ }
+
+ maxp = max_packet(maxp);
+ multi = hb_mult(maxp);
+ buf1 |= maxp;
+ maxp *= multi;
+
+ stream->buf0 = cpu_to_hc32(fusbh200, (epnum << 8) | dev->devnum);
+ stream->buf1 = cpu_to_hc32(fusbh200, buf1);
+ stream->buf2 = cpu_to_hc32(fusbh200, multi);
+
+ /* usbfs wants to report the average usecs per frame tied up
+ * when transfers on this endpoint are scheduled ...
+ */
+ if (dev->speed == USB_SPEED_FULL) {
+ interval <<= 3;
+ stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
+ is_input, 1, maxp));
+ stream->usecs /= 8;
+ } else {
+ stream->highspeed = 1;
+ stream->usecs = HS_USECS_ISO (maxp);
+ }
+ bandwidth = stream->usecs * 8;
+ bandwidth /= interval;
+
+ stream->bandwidth = bandwidth;
+ stream->udev = dev;
+ stream->bEndpointAddress = is_input | epnum;
+ stream->interval = interval;
+ stream->maxp = maxp;
+}
+
+static struct fusbh200_iso_stream *
+iso_stream_find (struct fusbh200_hcd *fusbh200, struct urb *urb)
+{
+ unsigned epnum;
+ struct fusbh200_iso_stream *stream;
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+
+ epnum = usb_pipeendpoint (urb->pipe);
+ if (usb_pipein(urb->pipe))
+ ep = urb->dev->ep_in[epnum];
+ else
+ ep = urb->dev->ep_out[epnum];
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ stream = ep->hcpriv;
+
+ if (unlikely (stream == NULL)) {
+ stream = iso_stream_alloc(GFP_ATOMIC);
+ if (likely (stream != NULL)) {
+ ep->hcpriv = stream;
+ stream->ep = ep;
+ iso_stream_init(fusbh200, stream, urb->dev, urb->pipe,
+ urb->interval);
+ }
+
+ /* if dev->ep [epnum] is a QH, hw is set */
+ } else if (unlikely (stream->hw != NULL)) {
+ fusbh200_dbg (fusbh200, "dev %s ep%d%s, not iso??\n",
+ urb->dev->devpath, epnum,
+ usb_pipein(urb->pipe) ? "in" : "out");
+ stream = NULL;
+ }
+
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return stream;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fusbh200_iso_sched ops can be ITD-only or SITD-only */
+
+static struct fusbh200_iso_sched *
+iso_sched_alloc (unsigned packets, gfp_t mem_flags)
+{
+ struct fusbh200_iso_sched *iso_sched;
+ int size = sizeof *iso_sched;
+
+ size += packets * sizeof (struct fusbh200_iso_packet);
+ iso_sched = kzalloc(size, mem_flags);
+ if (likely (iso_sched != NULL)) {
+ INIT_LIST_HEAD (&iso_sched->td_list);
+ }
+ return iso_sched;
+}
+
+static inline void
+itd_sched_init(
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_iso_sched *iso_sched,
+ struct fusbh200_iso_stream *stream,
+ struct urb *urb
+)
+{
+ unsigned i;
+ dma_addr_t dma = urb->transfer_dma;
+
+ /* how many uframes are needed for these transfers */
+ iso_sched->span = urb->number_of_packets * stream->interval;
+
+ /* figure out per-uframe itd fields that we'll need later
+ * when we fit new itds into the schedule.
+ */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ struct fusbh200_iso_packet *uframe = &iso_sched->packet [i];
+ unsigned length;
+ dma_addr_t buf;
+ u32 trans;
+
+ length = urb->iso_frame_desc [i].length;
+ buf = dma + urb->iso_frame_desc [i].offset;
+
+ trans = FUSBH200_ISOC_ACTIVE;
+ trans |= buf & 0x0fff;
+ if (unlikely (((i + 1) == urb->number_of_packets))
+ && !(urb->transfer_flags & URB_NO_INTERRUPT))
+ trans |= FUSBH200_ITD_IOC;
+ trans |= length << 16;
+ uframe->transaction = cpu_to_hc32(fusbh200, trans);
+
+ /* might need to cross a buffer page within a uframe */
+ uframe->bufp = (buf & ~(u64)0x0fff);
+ buf += length;
+ if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
+ uframe->cross = 1;
+ }
+}
+
+static void
+iso_sched_free (
+ struct fusbh200_iso_stream *stream,
+ struct fusbh200_iso_sched *iso_sched
+)
+{
+ if (!iso_sched)
+ return;
+ // caller must hold fusbh200->lock!
+ list_splice (&iso_sched->td_list, &stream->free_list);
+ kfree (iso_sched);
+}
+
+static int
+itd_urb_transaction (
+ struct fusbh200_iso_stream *stream,
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ gfp_t mem_flags
+)
+{
+ struct fusbh200_itd *itd;
+ dma_addr_t itd_dma;
+ int i;
+ unsigned num_itds;
+ struct fusbh200_iso_sched *sched;
+ unsigned long flags;
+
+ sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
+ if (unlikely (sched == NULL))
+ return -ENOMEM;
+
+ itd_sched_init(fusbh200, sched, stream, urb);
+
+ if (urb->interval < 8)
+ num_itds = 1 + (sched->span + 7) / 8;
+ else
+ num_itds = urb->number_of_packets;
+
+ /* allocate/init ITDs */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ for (i = 0; i < num_itds; i++) {
+
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
+ */
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
+ struct fusbh200_itd, itd_list);
+ if (itd->frame == fusbh200->now_frame)
+ goto alloc_itd;
+ list_del (&itd->itd_list);
+ itd_dma = itd->itd_dma;
+ } else {
+ alloc_itd:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ itd = dma_pool_alloc (fusbh200->itd_pool, mem_flags,
+ &itd_dma);
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ if (!itd) {
+ iso_sched_free(stream, sched);
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ memset (itd, 0, sizeof *itd);
+ itd->itd_dma = itd_dma;
+ list_add (&itd->itd_list, &sched->td_list);
+ }
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+ /* temporarily store schedule info in hcpriv */
+ urb->hcpriv = sched;
+ urb->error_count = 0;
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+itd_slot_ok (
+ struct fusbh200_hcd *fusbh200,
+ u32 mod,
+ u32 uframe,
+ u8 usecs,
+ u32 period
+)
+{
+ uframe %= period;
+ do {
+ /* can't commit more than uframe_periodic_max usec */
+ if (periodic_usecs (fusbh200, uframe >> 3, uframe & 0x7)
+ > (fusbh200->uframe_periodic_max - usecs))
+ return 0;
+
+ /* we know urb->interval is 2^N uframes */
+ uframe += period;
+ } while (uframe < mod);
+ return 1;
+}
+
+/*
+ * This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.) That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than fusbh200's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given FUSBH200_TUNE_FLS and the slop). Or, write a smarter scheduler!
+ */
+
+#define SCHEDULE_SLOP 80 /* microframes */
+
+static int
+iso_stream_schedule (
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ struct fusbh200_iso_stream *stream
+)
+{
+ u32 now, next, start, period, span;
+ int status;
+ unsigned mod = fusbh200->periodic_size << 3;
+ struct fusbh200_iso_sched *sched = urb->hcpriv;
+
+ period = urb->interval;
+ span = sched->span;
+
+ if (span > mod - SCHEDULE_SLOP) {
+ fusbh200_dbg (fusbh200, "iso request %p too long\n", urb);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ now = fusbh200_read_frame_index(fusbh200) & (mod - 1);
+
+ /* Typical case: reuse current schedule, stream is still active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc), but if there are we'll take the next
+ * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+ */
+ if (likely (!list_empty (&stream->td_list))) {
+ u32 excess;
+
+ /* For high speed devices, allow scheduling within the
+ * isochronous scheduling threshold. For full speed devices
+ * and Intel PCI-based controllers, don't (work around for
+ * Intel ICH9 bug).
+ */
+ if (!stream->highspeed && fusbh200->fs_i_thresh)
+ next = now + fusbh200->i_thresh;
+ else
+ next = now;
+
+ /* Fell behind (by up to twice the slop amount)?
+ * We decide based on the time of the last currently-scheduled
+ * slot, not the time of the next available slot.
+ */
+ excess = (stream->next_uframe - period - next) & (mod - 1);
+ if (excess >= mod - 2 * SCHEDULE_SLOP)
+ start = next + excess - mod + period *
+ DIV_ROUND_UP(mod - excess, period);
+ else
+ start = next + excess + period;
+ if (start - now >= mod) {
+ fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now - period, period,
+ mod);
+ status = -EFBIG;
+ goto fail;
+ }
+ }
+
+ /* need to schedule; when's the next (u)frame we could start?
+ * this is bigger than fusbh200->i_thresh allows; scheduling itself
+ * isn't free, the slop should handle reasonably slow cpus. it
+ * can also help high bandwidth if the dma and irq loads don't
+ * jump until after the queue is primed.
+ */
+ else {
+ int done = 0;
+ start = SCHEDULE_SLOP + (now & ~0x07);
+
+ /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (itd_slot_ok(fusbh200, mod, start,
+ stream->usecs, period))
+ done = 1;
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ fusbh200_dbg(fusbh200, "iso resched full %p (now %d max %d)\n",
+ urb, now, now + mod);
+ status = -ENOSPC;
+ goto fail;
+ }
+ }
+
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start - now + span - period
+ >= mod - 2 * SCHEDULE_SLOP)) {
+ fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now, span - period,
+ mod - 2 * SCHEDULE_SLOP);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ stream->next_uframe = start & (mod - 1);
+
+ /* report high speed start in uframes; full speed, in frames */
+ urb->start_frame = stream->next_uframe;
+ if (!stream->highspeed)
+ urb->start_frame >>= 3;
+
+ /* Make sure scan_isoc() sees these */
+ if (fusbh200->isoc_count == 0)
+ fusbh200->next_frame = now >> 3;
+ return 0;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+itd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_iso_stream *stream,
+ struct fusbh200_itd *itd)
+{
+ int i;
+
+ /* it's been recently zeroed */
+ itd->hw_next = FUSBH200_LIST_END(fusbh200);
+ itd->hw_bufp [0] = stream->buf0;
+ itd->hw_bufp [1] = stream->buf1;
+ itd->hw_bufp [2] = stream->buf2;
+
+ for (i = 0; i < 8; i++)
+ itd->index[i] = -1;
+
+ /* All other fields are filled when scheduling */
+}
+
+static inline void
+itd_patch(
+ struct fusbh200_hcd *fusbh200,
+ struct fusbh200_itd *itd,
+ struct fusbh200_iso_sched *iso_sched,
+ unsigned index,
+ u16 uframe
+)
+{
+ struct fusbh200_iso_packet *uf = &iso_sched->packet [index];
+ unsigned pg = itd->pg;
+
+ // BUG_ON (pg == 6 && uf->cross);
+
+ uframe &= 0x07;
+ itd->index [uframe] = index;
+
+ itd->hw_transaction[uframe] = uf->transaction;
+ itd->hw_transaction[uframe] |= cpu_to_hc32(fusbh200, pg << 12);
+ itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, uf->bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(uf->bufp >> 32));
+
+ /* iso_frame_desc[].offset must be strictly increasing */
+ if (unlikely (uf->cross)) {
+ u64 bufp = uf->bufp + 4096;
+
+ itd->pg = ++pg;
+ itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(bufp >> 32));
+ }
+}
+
+static inline void
+itd_link (struct fusbh200_hcd *fusbh200, unsigned frame, struct fusbh200_itd *itd)
+{
+ union fusbh200_shadow *prev = &fusbh200->pshadow[frame];
+ __hc32 *hw_p = &fusbh200->periodic[frame];
+ union fusbh200_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip any iso nodes which might belong to previous microframes */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fusbh200, *hw_p);
+ if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fusbh200, prev, type);
+ hw_p = shadow_next_periodic(fusbh200, &here, type);
+ here = *prev;
+ }
+
+ itd->itd_next = here;
+ itd->hw_next = *hw_p;
+ prev->itd = itd;
+ itd->frame = frame;
+ wmb ();
+ *hw_p = cpu_to_hc32(fusbh200, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(
+ struct fusbh200_hcd *fusbh200,
+ struct urb *urb,
+ unsigned mod,
+ struct fusbh200_iso_stream *stream
+)
+{
+ int packet;
+ unsigned next_uframe, uframe, frame;
+ struct fusbh200_iso_sched *iso_sched = urb->hcpriv;
+ struct fusbh200_itd *itd;
+
+ next_uframe = stream->next_uframe & (mod - 1);
+
+ if (unlikely (list_empty(&stream->td_list))) {
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
+ += stream->bandwidth;
+ fusbh200_dbg(fusbh200,
+ "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
+ urb->dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ urb->interval,
+ next_uframe >> 3, next_uframe & 0x7);
+ }
+
+ /* fill iTDs uframe by uframe */
+ for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
+ if (itd == NULL) {
+ /* ASSERT: we have all necessary itds */
+ // BUG_ON (list_empty (&iso_sched->td_list));
+
+ /* ASSERT: no itds for this endpoint in this uframe */
+
+ itd = list_entry (iso_sched->td_list.next,
+ struct fusbh200_itd, itd_list);
+ list_move_tail (&itd->itd_list, &stream->td_list);
+ itd->stream = stream;
+ itd->urb = urb;
+ itd_init (fusbh200, stream, itd);
+ }
+
+ uframe = next_uframe & 0x07;
+ frame = next_uframe >> 3;
+
+ itd_patch(fusbh200, itd, iso_sched, packet, uframe);
+
+ next_uframe += stream->interval;
+ next_uframe &= mod - 1;
+ packet++;
+
+ /* link completed itds into the schedule */
+ if (((next_uframe >> 3) != frame)
+ || packet == urb->number_of_packets) {
+ itd_link(fusbh200, frame & (fusbh200->periodic_size - 1), itd);
+ itd = NULL;
+ }
+ }
+ stream->next_uframe = next_uframe;
+
+ /* don't need that schedule data any more */
+ iso_sched_free (stream, iso_sched);
+ urb->hcpriv = NULL;
+
+ ++fusbh200->isoc_count;
+ enable_periodic(fusbh200);
+}
+
+#define ISO_ERRS (FUSBH200_ISOC_BUF_ERR | FUSBH200_ISOC_BABBLE | FUSBH200_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD. Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly. That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs. It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
+{
+ struct urb *urb = itd->urb;
+ struct usb_iso_packet_descriptor *desc;
+ u32 t;
+ unsigned uframe;
+ int urb_index = -1;
+ struct fusbh200_iso_stream *stream = itd->stream;
+ struct usb_device *dev;
+ bool retval = false;
+
+ /* for each uframe with a packet */
+ for (uframe = 0; uframe < 8; uframe++) {
+ if (likely (itd->index[uframe] == -1))
+ continue;
+ urb_index = itd->index[uframe];
+ desc = &urb->iso_frame_desc [urb_index];
+
+ t = hc32_to_cpup(fusbh200, &itd->hw_transaction [uframe]);
+ itd->hw_transaction [uframe] = 0;
+
+ /* report transfer status */
+ if (unlikely (t & ISO_ERRS)) {
+ urb->error_count++;
+ if (t & FUSBH200_ISOC_BUF_ERR)
+ desc->status = usb_pipein (urb->pipe)
+ ? -ENOSR /* hc couldn't read */
+ : -ECOMM; /* hc couldn't write */
+ else if (t & FUSBH200_ISOC_BABBLE)
+ desc->status = -EOVERFLOW;
+ else /* (t & FUSBH200_ISOC_XACTERR) */
+ desc->status = -EPROTO;
+
+ /* HC need not update length with this error */
+ if (!(t & FUSBH200_ISOC_BABBLE)) {
+ desc->actual_length = fusbh200_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ }
+ } else if (likely ((t & FUSBH200_ISOC_ACTIVE) == 0)) {
+ desc->status = 0;
+ desc->actual_length = fusbh200_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ } else {
+ /* URB was too late */
+ desc->status = -EXDEV;
+ }
+ }
+
+ /* handle completion now? */
+ if (likely ((urb_index + 1) != urb->number_of_packets))
+ goto done;
+
+ /* ASSERT: it's really the last itd for this urb
+ list_for_each_entry (itd, &stream->td_list, itd_list)
+ BUG_ON (itd->urb == urb);
+ */
+
+ /* give urb back to the driver; completion often (re)submits */
+ dev = urb->dev;
+ fusbh200_urb_done(fusbh200, urb, 0);
+ retval = true;
+ urb = NULL;
+
+ --fusbh200->isoc_count;
+ disable_periodic(fusbh200);
+
+ if (unlikely(list_is_singular(&stream->td_list))) {
+ fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
+ -= stream->bandwidth;
+ fusbh200_dbg(fusbh200,
+ "deschedule devp %s ep%d%s-iso\n",
+ dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+ }
+
+done:
+ itd->urb = NULL;
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &fusbh200->cached_itd_list);
+ start_free_itds(fusbh200);
+ }
+
+ return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int itd_submit (struct fusbh200_hcd *fusbh200, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int status = -EINVAL;
+ unsigned long flags;
+ struct fusbh200_iso_stream *stream;
+
+ /* Get iso_stream head */
+ stream = iso_stream_find (fusbh200, urb);
+ if (unlikely (stream == NULL)) {
+ fusbh200_dbg (fusbh200, "can't get iso stream\n");
+ return -ENOMEM;
+ }
+ if (unlikely (urb->interval != stream->interval &&
+ fusbh200_port_speed(fusbh200, 0) == USB_PORT_STAT_HIGH_SPEED)) {
+ fusbh200_dbg (fusbh200, "can't change iso interval %d --> %d\n",
+ stream->interval, urb->interval);
+ goto done;
+ }
+
+#ifdef FUSBH200_URB_TRACE
+ fusbh200_dbg (fusbh200,
+ "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein (urb->pipe) ? "in" : "out",
+ urb->transfer_buffer_length,
+ urb->number_of_packets, urb->interval,
+ stream);
+#endif
+
+ /* allocate ITDs w/o locking anything */
+ status = itd_urb_transaction (stream, fusbh200, urb, mem_flags);
+ if (unlikely (status < 0)) {
+ fusbh200_dbg (fusbh200, "can't init itds\n");
+ goto done;
+ }
+
+ /* schedule ... need to lock */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+ status = iso_stream_schedule(fusbh200, urb, stream);
+ if (likely (status == 0))
+ itd_link_urb (fusbh200, urb, fusbh200->periodic_size << 3, stream);
+ else
+ usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+ done_not_linked:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ done:
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_isoc(struct fusbh200_hcd *fusbh200)
+{
+ unsigned uf, now_frame, frame;
+ unsigned fmask = fusbh200->periodic_size - 1;
+ bool modified, live;
+
+ /*
+ * When running, scan from last scan point up to "now"
+ * else clean up by scanning everything that's left.
+ * Touches as few pages as possible: cache-friendly.
+ */
+ if (fusbh200->rh_state >= FUSBH200_RH_RUNNING) {
+ uf = fusbh200_read_frame_index(fusbh200);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
+ } else {
+ now_frame = (fusbh200->next_frame - 1) & fmask;
+ live = false;
+ }
+ fusbh200->now_frame = now_frame;
+
+ frame = fusbh200->next_frame;
+ for (;;) {
+ union fusbh200_shadow q, *q_p;
+ __hc32 type, *hw_p;
+
+restart:
+ /* scan each element in frame's queue for completions */
+ q_p = &fusbh200->pshadow [frame];
+ hw_p = &fusbh200->periodic [frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(fusbh200, *hw_p);
+ modified = false;
+
+ while (q.ptr != NULL) {
+ switch (hc32_to_cpu(fusbh200, type)) {
+ case Q_TYPE_ITD:
+ /* If this ITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (frame == now_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(fusbh200))
+ break;
+ }
+ if (uf < 8) {
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
+ type = Q_NEXT_TYPE(fusbh200,
+ q.itd->hw_next);
+ q = *q_p;
+ break;
+ }
+ }
+
+ /* Take finished ITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion. HC won't cache the
+ * pointer for much longer, if at all.
+ */
+ *q_p = q.itd->itd_next;
+ *hw_p = q.itd->hw_next;
+ type = Q_NEXT_TYPE(fusbh200, q.itd->hw_next);
+ wmb();
+ modified = itd_complete (fusbh200, q.itd);
+ q = *q_p;
+ break;
+ default:
+ fusbh200_dbg(fusbh200, "corrupt type %d frame %d shadow %p\n",
+ type, frame, q.ptr);
+ // BUG ();
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
+ q.ptr = NULL;
+ break;
+ }
+
+ /* assume completion callbacks modify the queue */
+ if (unlikely(modified && fusbh200->isoc_count > 0))
+ goto restart;
+ }
+
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
+ break;
+ frame = (frame + 1) & fmask;
+ }
+ fusbh200->next_frame = now_frame;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fusbh200_hcd *fusbh200;
+ int n;
+
+ fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", fusbh200->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fusbh200_hcd *fusbh200;
+ unsigned uframe_periodic_max;
+ unsigned frame, uframe;
+ unsigned short allocated_max;
+ unsigned long flags;
+ ssize_t ret;
+
+ fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ fusbh200_info(fusbh200, "rejecting invalid request for "
+ "uframe_periodic_max=%u\n", uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave (&fusbh200->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * every microframe in the schedule to see whether the decrease is
+ * possible.
+ */
+ if (uframe_periodic_max < fusbh200->uframe_periodic_max) {
+ allocated_max = 0;
+
+ for (frame = 0; frame < fusbh200->periodic_size; ++frame)
+ for (uframe = 0; uframe < 7; ++uframe)
+ allocated_max = max(allocated_max,
+ periodic_usecs (fusbh200, frame, uframe));
+
+ if (allocated_max > uframe_periodic_max) {
+ fusbh200_info(fusbh200,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ fusbh200_info(fusbh200, "setting max periodic bandwidth to %u%% "
+ "(== %u usec/uframe)\n",
+ 100*uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ fusbh200_warn(fusbh200, "max periodic bandwidth set is non-standard\n");
+
+ fusbh200->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return ret;
+}
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
+
+
+static inline int create_sysfs_files(struct fusbh200_hcd *fusbh200)
+{
+ struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
+ int i = 0;
+
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct fusbh200_hcd *fusbh200)
+{
+ struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
+/*-------------------------------------------------------------------------*/
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void fusbh200_turn_off_all_ports(struct fusbh200_hcd *fusbh200)
+{
+ u32 __iomem *status_reg = &fusbh200->regs->port_status;
+
+ fusbh200_writel(fusbh200, PORT_RWC_BITS, status_reg);
+}
+
+/*
+ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fusbh200_silence_controller(struct fusbh200_hcd *fusbh200)
+{
+ fusbh200_halt(fusbh200);
+
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->rh_state = FUSBH200_RH_HALTED;
+ fusbh200_turn_off_all_ports(fusbh200);
+ spin_unlock_irq(&fusbh200->lock);
+}
+
+/* fusbh200_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void fusbh200_shutdown(struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->shutdown = true;
+ fusbh200->rh_state = FUSBH200_RH_STOPPING;
+ fusbh200->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fusbh200->lock);
+
+ fusbh200_silence_controller(fusbh200);
+
+ hrtimer_cancel(&fusbh200->hrtimer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * fusbh200_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping fusbh200->lock.
+ */
+static void fusbh200_work (struct fusbh200_hcd *fusbh200)
+{
+ /* another CPU may drop fusbh200->lock during a schedule scan while
+ * it reports urb completions. this flag guards against bogus
+ * attempts at re-entrant schedule scanning.
+ */
+ if (fusbh200->scanning) {
+ fusbh200->need_rescan = true;
+ return;
+ }
+ fusbh200->scanning = true;
+
+ rescan:
+ fusbh200->need_rescan = false;
+ if (fusbh200->async_count)
+ scan_async(fusbh200);
+ if (fusbh200->intr_count > 0)
+ scan_intr(fusbh200);
+ if (fusbh200->isoc_count > 0)
+ scan_isoc(fusbh200);
+ if (fusbh200->need_rescan)
+ goto rescan;
+ fusbh200->scanning = false;
+
+ /* the IO watchdog guards against hardware or driver bugs that
+ * misplace IRQs, and should let us run completely without IRQs.
+ * such lossage has been observed on both VT6202 and VT8235.
+ */
+ turn_on_io_watchdog(fusbh200);
+}
+
+/*
+ * Called when the fusbh200_hcd module is removed.
+ */
+static void fusbh200_stop (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+
+ fusbh200_dbg (fusbh200, "stop\n");
+
+ /* no more interrupts ... */
+
+ spin_lock_irq(&fusbh200->lock);
+ fusbh200->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fusbh200->lock);
+
+ fusbh200_quiesce(fusbh200);
+ fusbh200_silence_controller(fusbh200);
+ fusbh200_reset (fusbh200);
+
+ hrtimer_cancel(&fusbh200->hrtimer);
+ remove_sysfs_files(fusbh200);
+ remove_debug_files (fusbh200);
+
+ /* root hub is shut down separately (first, when possible) */
+ spin_lock_irq (&fusbh200->lock);
+ end_free_itds(fusbh200);
+ spin_unlock_irq (&fusbh200->lock);
+ fusbh200_mem_cleanup (fusbh200);
+
+ fusbh200_dbg(fusbh200, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
+ fusbh200->stats.lost_iaa);
+ fusbh200_dbg (fusbh200, "complete %ld unlink %ld\n",
+ fusbh200->stats.complete, fusbh200->stats.unlink);
+
+ dbg_status (fusbh200, "fusbh200_stop completed",
+ fusbh200_readl(fusbh200, &fusbh200->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int hcd_fusbh200_init(struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ u32 temp;
+ int retval;
+ u32 hcc_params;
+ struct fusbh200_qh_hw *hw;
+
+ spin_lock_init(&fusbh200->lock);
+
+ /*
+ * keep io watchdog by default, those good HCDs could turn off it later
+ */
+ fusbh200->need_io_watchdog = 1;
+
+ hrtimer_init(&fusbh200->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ fusbh200->hrtimer.function = fusbh200_hrtimer_func;
+ fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
+
+ hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+ /*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ fusbh200->uframe_periodic_max = 100;
+
+ /*
+ * hw default: 1K periodic list heads, one per frame.
+ * periodic_size can shrink by USBCMD update if hcc_params allows.
+ */
+ fusbh200->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&fusbh200->intr_qh_list);
+ INIT_LIST_HEAD(&fusbh200->cached_itd_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (FUSBH200_TUNE_FLS) {
+ case 0: fusbh200->periodic_size = 1024; break;
+ case 1: fusbh200->periodic_size = 512; break;
+ case 2: fusbh200->periodic_size = 256; break;
+ default: BUG();
+ }
+ }
+ if ((retval = fusbh200_mem_init(fusbh200, GFP_KERNEL)) < 0)
+ return retval;
+
+ /* controllers may cache some of the periodic schedule ... */
+ fusbh200->i_thresh = 2;
+
+ /*
+ * dedicate a qh for the async ring head, since we couldn't unlink
+ * a 'real' qh without stopping the async schedule [4.8]. use it
+ * as the 'reclamation list head' too.
+ * its dummy is used in hw_alt_next of many tds, to prevent the qh
+ * from automatically advancing to the next td after short reads.
+ */
+ fusbh200->async->qh_next.qh = NULL;
+ hw = fusbh200->async->hw;
+ hw->hw_next = QH_NEXT(fusbh200, fusbh200->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(fusbh200, QH_HEAD);
+ hw->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
+ hw->hw_qtd_next = FUSBH200_LIST_END(fusbh200);
+ fusbh200->async->qh_state = QH_STATE_LINKED;
+ hw->hw_alt_next = QTD_NEXT(fusbh200, fusbh200->async->dummy->qtd_dma);
+
+ /* clear interrupt enables, set irq latency */
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+ * schedule by avoiding QH fetches between transfers.
+ *
+ * With fast usb storage devices and NForce2, "park" seems to
+ * make problems: throughput reduction (!), data errors...
+ */
+ if (park) {
+ park = min(park, (unsigned) 3);
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+ fusbh200_dbg(fusbh200, "park %d\n", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ temp &= ~(3 << 2);
+ temp |= (FUSBH200_TUNE_FLS << 2);
+ }
+ fusbh200->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
+ return 0;
+}
+
+/* start HC running; it's halted, hcd_fusbh200_init() has been run (once) */
+static int fusbh200_run (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ u32 temp;
+ u32 hcc_params;
+
+ hcd->uses_new_polling = 1;
+
+ /* EHCI spec section 4.1 */
+
+ fusbh200_writel(fusbh200, fusbh200->periodic_dma, &fusbh200->regs->frame_list);
+ fusbh200_writel(fusbh200, (u32)fusbh200->async->qh_dma, &fusbh200->regs->async_next);
+
+ /*
+ * hcc_params controls whether fusbh200->regs->segment must (!!!)
+ * be used; it constrains QH/ITD/SITD and QTD locations.
+ * pci_pool consistent memory always uses segment zero.
+ * streaming mappings for I/O buffers, like pci_map_single(),
+ * can return segments above 4GB, if the device allows.
+ *
+ * NOTE: the dma mask is visible through dma_supported(), so
+ * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+ * Scsi_Host.highmem_io, and so forth. It's readonly to all
+ * host side drivers though.
+ */
+ hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+ // Philips, Intel, and maybe others need CMD_RUN before the
+ // root hub will detect new devices (why?); NEC doesn't
+ fusbh200->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ fusbh200->command |= CMD_RUN;
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+ dbg_cmd (fusbh200, "init", fusbh200->command);
+
+ /*
+ * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+ * are explicitly handed to companion controller(s), so no TT is
+ * involved with the root hub. (Except where one is integrated,
+ * and there's no companion controller unless maybe for USB OTG.)
+ *
+ * Turning on the CF flag will transfer ownership of all ports
+ * from the companions to the EHCI controller. If any of the
+ * companions are in the middle of a port reset at the time, it
+ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
+ * guarantees that no resets are in progress. After we set CF,
+ * a short delay lets the hardware catch up; new resets shouldn't
+ * be started before the port switching actions could complete.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ fusbh200->rh_state = FUSBH200_RH_RUNNING;
+ fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
+ msleep(5);
+ up_write(&ehci_cf_port_reset_rwsem);
+ fusbh200->last_periodic_enable = ktime_get_real();
+
+ temp = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+ fusbh200_info (fusbh200,
+ "USB %x.%x started, EHCI %x.%02x\n",
+ ((fusbh200->sbrn & 0xf0)>>4), (fusbh200->sbrn & 0x0f),
+ temp >> 8, temp & 0xff);
+
+ fusbh200_writel(fusbh200, INTR_MASK,
+ &fusbh200->regs->intr_enable); /* Turn On Interrupts */
+
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ create_debug_files(fusbh200);
+ create_sysfs_files(fusbh200);
+
+ return 0;
+}
+
+static int fusbh200_setup(struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ int retval;
+
+ fusbh200->regs = (void __iomem *)fusbh200->caps +
+ HC_LENGTH(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+ dbg_hcs_params(fusbh200, "reset");
+ dbg_hcc_params(fusbh200, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ fusbh200->hcs_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+
+ fusbh200->sbrn = HCD_USB2;
+
+ /* data structure init */
+ retval = hcd_fusbh200_init(hcd);
+ if (retval)
+ return retval;
+
+ retval = fusbh200_halt(fusbh200);
+ if (retval)
+ return retval;
+
+ fusbh200_reset(fusbh200);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t fusbh200_irq (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
+
+ spin_lock (&fusbh200->lock);
+
+ status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
+
+ /* e.g. cardbus physical eject */
+ if (status == ~(u32) 0) {
+ fusbh200_dbg (fusbh200, "device removed\n");
+ goto dead;
+ }
+
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
+ /* Shared IRQ? */
+ if (!masked_status || unlikely(fusbh200->rh_state == FUSBH200_RH_HALTED)) {
+ spin_unlock(&fusbh200->lock);
+ return IRQ_NONE;
+ }
+
+ /* clear (just) interrupts */
+ fusbh200_writel(fusbh200, masked_status, &fusbh200->regs->status);
+ cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+ bh = 0;
+
+ /* normal [4.15.1.2] or error [4.15.1.1] completion */
+ if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
+ if (likely ((status & STS_ERR) == 0))
+ COUNT (fusbh200->stats.normal);
+ else
+ COUNT (fusbh200->stats.error);
+ bh = 1;
+ }
+
+ /* complete the unlinking of some qh [4.15.2.3] */
+ if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (fusbh200->next_hrtimer_event == FUSBH200_HRTIMER_IAA_WATCHDOG)
+ ++fusbh200->next_hrtimer_event;
+
+ /* guard against (alleged) silicon errata */
+ if (cmd & CMD_IAAD)
+ fusbh200_dbg(fusbh200, "IAA with IAAD still set?\n");
+ if (fusbh200->async_iaa) {
+ COUNT(fusbh200->stats.iaa);
+ end_unlink_async(fusbh200);
+ } else
+ fusbh200_dbg(fusbh200, "IAA with nothing unlinked?\n");
+ }
+
+ /* remote wakeup [4.3.1] */
+ if (status & STS_PCD) {
+ int pstatus;
+ u32 __iomem *status_reg = &fusbh200->regs->port_status;
+
+ /* kick root hub later */
+ pcd_status = status;
+
+ /* resume root hub? */
+ if (fusbh200->rh_state == FUSBH200_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+
+ pstatus = fusbh200_readl(fusbh200, status_reg);
+
+ if (test_bit(0, &fusbh200->suspended_ports) &&
+ ((pstatus & PORT_RESUME) ||
+ !(pstatus & PORT_SUSPEND)) &&
+ (pstatus & PORT_PE) &&
+ fusbh200->reset_done[0] == 0) {
+
+ /* start 20 msec resume signaling from this port,
+ * and make khubd collect PORT_STAT_C_SUSPEND to
+ * stop that signaling. Use 5 ms extra for safety,
+ * like usb_port_resume() does.
+ */
+ fusbh200->reset_done[0] = jiffies + msecs_to_jiffies(25);
+ set_bit(0, &fusbh200->resuming_ports);
+ fusbh200_dbg (fusbh200, "port 1 remote wakeup\n");
+ mod_timer(&hcd->rh_timer, fusbh200->reset_done[0]);
+ }
+ }
+
+ /* PCI errors [4.15.2.4] */
+ if (unlikely ((status & STS_FATAL) != 0)) {
+ fusbh200_err(fusbh200, "fatal error\n");
+ dbg_cmd(fusbh200, "fatal", cmd);
+ dbg_status(fusbh200, "fatal", status);
+dead:
+ usb_hc_died(hcd);
+
+ /* Don't let the controller do anything more */
+ fusbh200->shutdown = true;
+ fusbh200->rh_state = FUSBH200_RH_STOPPING;
+ fusbh200->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+ fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+ fusbh200_handle_controller_death(fusbh200);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
+ }
+
+ if (bh)
+ fusbh200_work (fusbh200);
+ spin_unlock (&fusbh200->lock);
+ if (pcd_status)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE: control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int fusbh200_urb_enqueue (
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags
+) {
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ struct list_head qtd_list;
+
+ INIT_LIST_HEAD (&qtd_list);
+
+ switch (usb_pipetype (urb->pipe)) {
+ case PIPE_CONTROL:
+ /* qh_completions() code doesn't handle all the fault cases
+ * in multi-TD control transfers. Even 1KB is rare anyway.
+ */
+ if (urb->transfer_buffer_length > (16 * 1024))
+ return -EMSGSIZE;
+ /* FALLTHROUGH */
+ /* case PIPE_BULK: */
+ default:
+ if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return submit_async(fusbh200, urb, &qtd_list, mem_flags);
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return intr_submit(fusbh200, urb, &qtd_list, mem_flags);
+
+ case PIPE_ISOCHRONOUS:
+ return itd_submit (fusbh200, urb, mem_flags);
+ }
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int fusbh200_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ struct fusbh200_qh *qh;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
+ switch (usb_pipetype (urb->pipe)) {
+ // case PIPE_CONTROL:
+ // case PIPE_BULK:
+ default:
+ qh = (struct fusbh200_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_async(fusbh200, qh);
+ break;
+ case QH_STATE_UNLINK:
+ case QH_STATE_UNLINK_WAIT:
+ /* already started */
+ break;
+ case QH_STATE_IDLE:
+ /* QH might be waiting for a Clear-TT-Buffer */
+ qh_completions(fusbh200, qh);
+ break;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ qh = (struct fusbh200_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_intr(fusbh200, qh);
+ break;
+ case QH_STATE_IDLE:
+ qh_completions (fusbh200, qh);
+ break;
+ default:
+ fusbh200_dbg (fusbh200, "bogus qh %p state %d\n",
+ qh, qh->qh_state);
+ goto done;
+ }
+ break;
+
+ case PIPE_ISOCHRONOUS:
+ // itd...
+
+ // wait till next completion, do it then.
+ // completion irqs can wait up to 1024 msec,
+ break;
+ }
+done:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// bulk qh holds the data toggle
+
+static void
+fusbh200_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ unsigned long flags;
+ struct fusbh200_qh *qh, *tmp;
+
+ /* ASSERT: any requests/urbs are being unlinked */
+ /* ASSERT: nobody can be submitting urbs for this any more */
+
+rescan:
+ spin_lock_irqsave (&fusbh200->lock, flags);
+ qh = ep->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* endpoints can be iso streams. for now, we don't
+ * accelerate iso completions ... so spin a while.
+ */
+ if (qh->hw == NULL) {
+ struct fusbh200_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ kfree(stream);
+ goto done;
+ }
+
+ if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+ qh->qh_state = QH_STATE_IDLE;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ for (tmp = fusbh200->async->qh_next.qh;
+ tmp && tmp != qh;
+ tmp = tmp->qh_next.qh)
+ continue;
+ /* periodic qh self-unlinks on empty, and a COMPLETING qh
+ * may already be unlinked.
+ */
+ if (tmp)
+ start_unlink_async(fusbh200, qh);
+ /* FALL THROUGH */
+ case QH_STATE_UNLINK: /* wait for hw to finish? */
+ case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case QH_STATE_IDLE: /* fully unlinked */
+ if (qh->clearing_tt)
+ goto idle_timeout;
+ if (list_empty (&qh->qtd_list)) {
+ qh_destroy(fusbh200, qh);
+ break;
+ }
+ /* else FALL THROUGH */
+ default:
+ /* caller was supposed to have unlinked any requests;
+ * that's not our job. just leak this memory.
+ */
+ fusbh200_err (fusbh200, "qh %p (#%02x) state %d%s\n",
+ qh, ep->desc.bEndpointAddress, qh->qh_state,
+ list_empty (&qh->qtd_list) ? "" : "(has tds)");
+ break;
+ }
+ done:
+ ep->hcpriv = NULL;
+ spin_unlock_irqrestore (&fusbh200->lock, flags);
+}
+
+static void
+fusbh200_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+ struct fusbh200_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ unsigned long flags;
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ spin_lock_irqsave(&fusbh200->lock, flags);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ usb_settoggle(qh->dev, epnum, is_out, 0);
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else if (qh->qh_state == QH_STATE_LINKED ||
+ qh->qh_state == QH_STATE_COMPLETING) {
+
+ /* The toggle value in the QH can't be updated
+ * while the QH is active. Unlink it now;
+ * re-linking will call qh_refresh().
+ */
+ if (eptype == USB_ENDPOINT_XFER_BULK)
+ start_unlink_async(fusbh200, qh);
+ else
+ start_unlink_intr(fusbh200, qh);
+ }
+ }
+ spin_unlock_irqrestore(&fusbh200->lock, flags);
+}
+
+static int fusbh200_get_frame (struct usb_hcd *hcd)
+{
+ struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
+ return (fusbh200_read_frame_index(fusbh200) >> 3) % fusbh200->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The EHCI in ChipIdea HDRC cannot be a separate module or device,
+ * because its registers (and irq) are shared between host/gadget/otg
+ * functions and in order to facilitate role switching we cannot
+ * give the fusbh200 driver exclusive access to those.
+ */
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_LICENSE ("GPL");
+
+static const struct hc_driver fusbh200_fusbh200_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Faraday USB2.0 Host Controller",
+ .hcd_priv_size = sizeof(struct fusbh200_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = fusbh200_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = hcd_fusbh200_init,
+ .start = fusbh200_run,
+ .stop = fusbh200_stop,
+ .shutdown = fusbh200_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = fusbh200_urb_enqueue,
+ .urb_dequeue = fusbh200_urb_dequeue,
+ .endpoint_disable = fusbh200_endpoint_disable,
+ .endpoint_reset = fusbh200_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = fusbh200_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = fusbh200_hub_status_data,
+ .hub_control = fusbh200_hub_control,
+ .bus_suspend = fusbh200_bus_suspend,
+ .bus_resume = fusbh200_bus_resume,
+
+ .relinquish_port = fusbh200_relinquish_port,
+ .port_handed_over = fusbh200_port_handed_over,
+
+ .clear_tt_buffer_complete = fusbh200_clear_tt_buffer_complete,
+};
+
+static void fusbh200_init(struct fusbh200_hcd *fusbh200)
+{
+ u32 reg;
+
+ reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmcsr);
+ reg |= BMCSR_INT_POLARITY;
+ reg &= ~BMCSR_VBUS_OFF;
+ fusbh200_writel(fusbh200, reg, &fusbh200->regs->bmcsr);
+
+ reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmier);
+ fusbh200_writel(fusbh200, reg | BMIER_OVC_EN | BMIER_VBUS_ERR_EN,
+ &fusbh200->regs->bmier);
+}
+
+/**
+ * fusbh200_hcd_probe - initialize faraday FUSBH200 HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int fusbh200_hcd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int retval = -ENODEV;
+ struct fusbh200_hcd *fusbh200;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pdev->dev.power.power_state = PMSG_ON;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(dev));
+ return -ENODEV;
+ }
+
+ irq = res->start;
+
+ hcd = usb_create_hcd(&fusbh200_fusbh200_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "failed to create hcd with err %d\n", retval);
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->has_tt = 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ fusbh200_fusbh200_hc_driver.description)) {
+ dev_dbg(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto fail_request_resource;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->regs = ioremap_nocache(res->start, resource_size(res));
+ if (hcd->regs == NULL) {
+ dev_dbg(dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto fail_ioremap;
+ }
+
+ fusbh200 = hcd_to_fusbh200(hcd);
+
+ fusbh200->caps = hcd->regs;
+
+ retval = fusbh200_setup(hcd);
+ if (retval)
+ goto fail_add_hcd;
+
+ fusbh200_init(fusbh200);
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(dev, "failed to add hcd with err %d\n", retval);
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return retval;
+
+fail_add_hcd:
+ iounmap(hcd->regs);
+fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
+ return retval;
+}
+
+/**
+ * fusbh200_hcd_remove - shutdown processing for EHCI HCDs
+ * @dev: USB Host Controller being removed
+ *
+ * Reverses the effect of fotg2xx_usb_hcd_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int fusbh200_hcd_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ if (!hcd)
+ return 0;
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver fusbh200_hcd_fusbh200_driver = {
+ .driver = {
+ .name = "fusbh200",
+ },
+ .probe = fusbh200_hcd_probe,
+ .remove = fusbh200_hcd_remove,
+};
+
+static int __init fusbh200_hcd_init(void)
+{
+ int retval = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
+ set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+ test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+ printk(KERN_WARNING "Warning! fusbh200_hcd should always be loaded"
+ " before uhci_hcd and ohci_hcd, not after\n");
+
+ pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
+ hcd_name,
+ sizeof(struct fusbh200_qh), sizeof(struct fusbh200_qtd),
+ sizeof(struct fusbh200_itd));
+
+ fusbh200_debug_root = debugfs_create_dir("fusbh200", usb_debug_root);
+ if (!fusbh200_debug_root) {
+ retval = -ENOENT;
+ goto err_debug;
+ }
+
+ retval = platform_driver_register(&fusbh200_hcd_fusbh200_driver);
+ if (retval < 0)
+ goto clean;
+ return retval;
+
+ platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
+clean:
+ debugfs_remove(fusbh200_debug_root);
+ fusbh200_debug_root = NULL;
+err_debug:
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ return retval;
+}
+module_init(fusbh200_hcd_init);
+
+static void __exit fusbh200_hcd_cleanup(void)
+{
+ platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
+ debugfs_remove(fusbh200_debug_root);
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(fusbh200_hcd_cleanup);
diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h
new file mode 100644
index 00000000000..6b719e066c3
--- /dev/null
+++ b/drivers/usb/host/fusbh200.h
@@ -0,0 +1,731 @@
+#ifndef __LINUX_FUSBH200_H
+#define __LINUX_FUSBH200_H
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given FUSBH200_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#define __hc32 __le32
+#define __hc16 __le16
+
+/* statistics can be kept for tuning/monitoring */
+struct fusbh200_stats {
+ /* irq usage */
+ unsigned long normal;
+ unsigned long error;
+ unsigned long iaa;
+ unsigned long lost_iaa;
+
+ /* termination of urbs from core */
+ unsigned long complete;
+ unsigned long unlink;
+};
+
+/* fusbh200_hcd->lock guards shared data against other CPUs:
+ * fusbh200_hcd: async, unlink, periodic (and shadow), ...
+ * usb_host_endpoint: hcpriv
+ * fusbh200_qh: qh_next, qtd_list
+ * fusbh200_qtd: qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define FUSBH200_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
+
+/*
+ * fusbh200_rh_state values of FUSBH200_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
+enum fusbh200_rh_state {
+ FUSBH200_RH_HALTED,
+ FUSBH200_RH_SUSPENDED,
+ FUSBH200_RH_RUNNING,
+ FUSBH200_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum fusbh200_hrtimer_event {
+ FUSBH200_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ FUSBH200_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ FUSBH200_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ FUSBH200_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ FUSBH200_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ FUSBH200_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ FUSBH200_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ FUSBH200_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ FUSBH200_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ FUSBH200_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ FUSBH200_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define FUSBH200_HRTIMER_NO_EVENT 99
+
+struct fusbh200_hcd { /* one per controller */
+ /* timing support */
+ enum fusbh200_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[FUSBH200_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
+ /* glue to PCI and HCD framework */
+ struct fusbh200_caps __iomem *caps;
+ struct fusbh200_regs __iomem *regs;
+ struct fusbh200_dbg_port __iomem *debug;
+
+ __u32 hcs_params; /* cached register copy */
+ spinlock_t lock;
+ enum fusbh200_rh_state rh_state;
+
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct fusbh200_qh *qh_scan_next;
+
+ /* async schedule support */
+ struct fusbh200_qh *async;
+ struct fusbh200_qh *dummy; /* For AMD quirk use */
+ struct fusbh200_qh *async_unlink;
+ struct fusbh200_qh *async_unlink_last;
+ struct fusbh200_qh *async_iaa;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
+
+ /* periodic schedule support */
+#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
+ unsigned periodic_size;
+ __hc32 *periodic; /* hw periodic table */
+ dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
+ unsigned i_thresh; /* uframes HC might cache */
+
+ union fusbh200_shadow *pshadow; /* mirror hw periodic table */
+ struct fusbh200_qh *intr_unlink;
+ struct fusbh200_qh *intr_unlink_last;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned next_frame; /* scan periodic, start here */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
+ unsigned uframe_periodic_max; /* max periodic time per uframe */
+
+
+ /* list of itds completed while now_frame was still active */
+ struct list_head cached_itd_list;
+ struct fusbh200_itd *last_itd_to_free;
+
+ /* per root hub port */
+ unsigned long reset_done [FUSBH200_MAX_ROOT_PORTS];
+
+ /* bit vectors (one bit per port) */
+ unsigned long bus_suspended; /* which ports were
+ already suspended at the start of a bus suspend */
+ unsigned long companion_ports; /* which ports are
+ dedicated to the companion controller */
+ unsigned long owned_ports; /* which ports are
+ owned by the companion during a bus suspend */
+ unsigned long port_c_suspend; /* which ports have
+ the change-suspend feature turned on */
+ unsigned long suspended_ports; /* which ports are
+ suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
+
+ /* per-HC memory pools (could be per-bus, but ...) */
+ struct dma_pool *qh_pool; /* qh per active urb */
+ struct dma_pool *qtd_pool; /* one or more per qh */
+ struct dma_pool *itd_pool; /* itd per iso urb */
+
+ unsigned random_frame;
+ unsigned long next_statechange;
+ ktime_t last_periodic_enable;
+ u32 command;
+
+ /* SILICON QUIRKS */
+ unsigned need_io_watchdog:1;
+ unsigned fs_i_thresh:1; /* Intel iso scheduling */
+
+ u8 sbrn; /* packed release number */
+
+ /* irq statistics */
+ struct fusbh200_stats stats;
+# define COUNT(x) do { (x)++; } while (0)
+
+ /* debug files */
+ struct dentry *debug_dir;
+};
+
+/* convert between an HCD pointer and the corresponding FUSBH200_HCD */
+static inline struct fusbh200_hcd *hcd_to_fusbh200 (struct usb_hcd *hcd)
+{
+ return (struct fusbh200_hcd *) (hcd->hcd_priv);
+}
+static inline struct usb_hcd *fusbh200_to_hcd (struct fusbh200_hcd *fusbh200)
+{
+ return container_of ((void *) fusbh200, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct fusbh200_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ * some hosts treat caplength and hciversion as parts of a 32-bit
+ * register, others treat them as two separate registers, this
+ * affects the memory map for big endian controllers.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(fusbh200, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+ (fusbh200_big_endian_capbase(fusbh200) ? 24 : 0)))
+#define HC_VERSION(fusbh200, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+ (fusbh200_big_endian_capbase(fusbh200) ? 0 : 16)))
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct fusbh200_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
+
+/* EHCI 1.1 addendum */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved1;
+ /* PORTSC: offset 0x20 */
+ u32 port_status;
+/* 31:23 reserved */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
+
+ u32 reserved2[3];
+
+ /* BMCSR: offset 0x30 */
+ u32 bmcsr; /* Bus Moniter Control/Status Register */
+#define BMCSR_HOST_SPD_TYP (3<<9)
+#define BMCSR_VBUS_OFF (1<<4)
+#define BMCSR_INT_POLARITY (1<<3)
+
+ /* BMISR: offset 0x34 */
+ u32 bmisr; /* Bus Moniter Interrupt Status Register*/
+#define BMISR_OVC (1<<1)
+
+ /* BMIER: offset 0x38 */
+ u32 bmier; /* Bus Moniter Interrupt Enable Register */
+#define BMIER_OVC_EN (1<<1)
+#define BMIER_VBUS_ERR_EN (1<<0)
+};
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct fusbh200_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+#include <linux/init.h>
+extern int __init early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from fusbh200 host driver to fusbh200 debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *hcd);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define QTD_NEXT(fusbh200, dma) cpu_to_hc32(fusbh200, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct fusbh200_qtd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.5.1 */
+ __hc32 hw_alt_next; /* see EHCI 3.5.2 */
+ __hc32 hw_token; /* see EHCI 3.5.3 */
+#define QTD_TOGGLE (1 << 31) /* data toggle */
+#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define QTD_IOC (1 << 15) /* interrupt on complete */
+#define QTD_CERR(tok) (((tok)>>10) & 0x3)
+#define QTD_PID(tok) (((tok)>>8) & 0x3)
+#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
+#define QTD_STS_HALT (1 << 6) /* halted on error */
+#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
+#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
+#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
+#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
+#define QTD_STS_STS (1 << 1) /* split transaction state */
+#define QTD_STS_PING (1 << 0) /* issue PING? */
+
+#define ACTIVE_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_ACTIVE)
+#define HALT_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_HALT)
+#define STATUS_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_STS)
+
+ __hc32 hw_buf [5]; /* see EHCI 3.5.4 */
+ __hc32 hw_buf_hi [5]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t qtd_dma; /* qtd address */
+ struct list_head qtd_list; /* sw qtd list */
+ struct urb *urb; /* qtd's urb */
+ size_t length; /* length of buffer */
+} __attribute__ ((aligned (32)));
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(fusbh200) cpu_to_hc32 (fusbh200, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,fstn}->hw_next */
+#define Q_NEXT_TYPE(fusbh200,dma) ((dma) & cpu_to_hc32(fusbh200, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD (0 << 1)
+#define Q_TYPE_QH (1 << 1)
+#define Q_TYPE_SITD (2 << 1)
+#define Q_TYPE_FSTN (3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(fusbh200,dma) (cpu_to_hc32(fusbh200, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define FUSBH200_LIST_END(fusbh200) cpu_to_hc32(fusbh200, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure. That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule. Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union fusbh200_shadow {
+ struct fusbh200_qh *qh; /* Q_TYPE_QH */
+ struct fusbh200_itd *itd; /* Q_TYPE_ITD */
+ struct fusbh200_fstn *fstn; /* Q_TYPE_FSTN */
+ __hc32 *hw_next; /* (all types) */
+ void *ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct fusbh200_qh_hw {
+ __hc32 hw_next; /* see EHCI 3.6.1 */
+ __hc32 hw_info1; /* see EHCI 3.6.2 */
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
+ __hc32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_SMASK 0x000000ff
+#define QH_CMASK 0x0000ff00
+#define QH_HUBADDR 0x007f0000
+#define QH_HUBPORT 0x3f800000
+#define QH_MULT 0xc0000000
+ __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
+
+ /* qtd overlay (hardware parts of a struct fusbh200_qtd) */
+ __hc32 hw_qtd_next;
+ __hc32 hw_alt_next;
+ __hc32 hw_token;
+ __hc32 hw_buf [5];
+ __hc32 hw_buf_hi [5];
+} __attribute__ ((aligned(32)));
+
+struct fusbh200_qh {
+ struct fusbh200_qh_hw *hw; /* Must come first */
+ /* the rest is HCD-private */
+ dma_addr_t qh_dma; /* address of qh */
+ union fusbh200_shadow qh_next; /* ptr to qh; or periodic */
+ struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
+ struct fusbh200_qtd *dummy;
+ struct fusbh200_qh *unlink_next; /* next on unlink list */
+
+ unsigned unlink_cycle;
+
+ u8 needs_rescan; /* Dequeue during giveback */
+ u8 qh_state;
+#define QH_STATE_LINKED 1 /* HC sees this */
+#define QH_STATE_UNLINK 2 /* HC may still see this */
+#define QH_STATE_IDLE 3 /* HC doesn't see this */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
+#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
+
+ u8 xacterrs; /* XactErr retry counter */
+#define QH_XACTERR_MAX 32 /* XactErr retry limit */
+
+ /* periodic schedule info */
+ u8 usecs; /* intr bandwidth */
+ u8 gap_uf; /* uframes split/csplit gap */
+ u8 c_usecs; /* ... split completion bw */
+ u16 tt_usecs; /* tt downstream bandwidth */
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+#define NO_FRAME ((unsigned short)~0) /* pick new start */
+
+ struct usb_device *dev; /* access to TT */
+ unsigned is_out:1; /* bulk or intr OUT */
+ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct fusbh200_iso_packet {
+ /* These will be copied to iTD when scheduling */
+ u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
+ __hc32 transaction; /* itd->hw_transaction[i] |= */
+ u8 cross; /* buf crosses pages */
+ /* for full speed OUT splits */
+ u32 buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct fusbh200_iso_sched {
+ struct list_head td_list;
+ unsigned span;
+ struct fusbh200_iso_packet packet [0];
+};
+
+/*
+ * fusbh200_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct fusbh200_iso_stream {
+ /* first field matches fusbh200_hq, but is NULL */
+ struct fusbh200_qh_hw *hw;
+
+ u8 bEndpointAddress;
+ u8 highspeed;
+ struct list_head td_list; /* queued itds */
+ struct list_head free_list; /* list of unused itds */
+ struct usb_device *udev;
+ struct usb_host_endpoint *ep;
+
+ /* output of (re)scheduling */
+ int next_uframe;
+ __hc32 splits;
+
+ /* the rest is derived from the endpoint descriptor,
+ * trusting urb->interval == f(epdesc->bInterval) and
+ * including the extra info for hw_bufp[0..2]
+ */
+ u8 usecs, c_usecs;
+ u16 interval;
+ u16 tt_usecs;
+ u16 maxp;
+ u16 raw_mask;
+ unsigned bandwidth;
+
+ /* This is used to initialize iTD's hw_bufp fields */
+ __hc32 buf0;
+ __hc32 buf1;
+ __hc32 buf2;
+
+ /* this is used to initialize sITD's tt info */
+ __hc32 address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct fusbh200_itd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.3.1 */
+ __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */
+#define FUSBH200_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
+#define FUSBH200_ISOC_BUF_ERR (1<<30) /* Data buffer error */
+#define FUSBH200_ISOC_BABBLE (1<<29) /* babble detected */
+#define FUSBH200_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
+#define FUSBH200_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
+#define FUSBH200_ITD_IOC (1 << 15) /* interrupt on complete */
+
+#define ITD_ACTIVE(fusbh200) cpu_to_hc32(fusbh200, FUSBH200_ISOC_ACTIVE)
+
+ __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */
+ __hc32 hw_bufp_hi [7]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t itd_dma; /* for this itd */
+ union fusbh200_shadow itd_next; /* ptr to periodic q entry */
+
+ struct urb *urb;
+ struct fusbh200_iso_stream *stream; /* endpoint's queue */
+ struct list_head itd_list; /* list of stream's itds */
+
+ /* any/all hw_transactions here may be used by that urb */
+ unsigned frame; /* where scheduled */
+ unsigned pg;
+ unsigned index[8]; /* in urb->iso_frame_desc */
+} __attribute__ ((aligned (32)));
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct fusbh200_fstn {
+ __hc32 hw_next; /* any periodic q entry */
+ __hc32 hw_prev; /* qh or FUSBH200_LIST_END */
+
+ /* the rest is HCD-private */
+ dma_addr_t fstn_dma;
+ union fusbh200_shadow fstn_next; /* ptr to periodic q entry */
+} __attribute__ ((aligned (32)));
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define fusbh200_prepare_ports_for_controller_suspend(fusbh200, do_wakeup) \
+ fusbh200_adjust_port_wakeup_flags(fusbh200, true, do_wakeup);
+
+#define fusbh200_prepare_ports_for_controller_resume(fusbh200) \
+ fusbh200_adjust_port_wakeup_flags(fusbh200, false, false);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature. Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+static inline unsigned int
+fusbh200_get_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
+{
+ return (readl(&fusbh200->regs->bmcsr)
+ & BMCSR_HOST_SPD_TYP) >> 9;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+fusbh200_port_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
+{
+ switch (fusbh200_get_speed(fusbh200, portsc)) {
+ case 0:
+ return 0;
+ case 1:
+ return USB_PORT_STAT_LOW_SPEED;
+ case 2:
+ default:
+ return USB_PORT_STAT_HIGH_SPEED;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_has_fsl_portno_bug(e) (0)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ */
+
+#define fusbh200_big_endian_mmio(e) 0
+#define fusbh200_big_endian_capbase(e) 0
+
+static inline unsigned int fusbh200_readl(const struct fusbh200_hcd *fusbh200,
+ __u32 __iomem * regs)
+{
+ return readl(regs);
+}
+
+static inline void fusbh200_writel(const struct fusbh200_hcd *fusbh200,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ writel(val, regs);
+}
+
+/* cpu to fusbh200 */
+static inline __hc32 cpu_to_hc32 (const struct fusbh200_hcd *fusbh200, const u32 x)
+{
+ return cpu_to_le32(x);
+}
+
+/* fusbh200 to cpu */
+static inline u32 hc32_to_cpu (const struct fusbh200_hcd *fusbh200, const __hc32 x)
+{
+ return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup (const struct fusbh200_hcd *fusbh200, const __hc32 *x)
+{
+ return le32_to_cpup(x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline unsigned fusbh200_read_frame_index(struct fusbh200_hcd *fusbh200)
+{
+ return fusbh200_readl(fusbh200, &fusbh200->regs->frame_index);
+}
+
+#define fusbh200_itdlen(urb, desc, t) ({ \
+ usb_pipein((urb)->pipe) ? \
+ (desc)->length - FUSBH200_ITD_LENGTH(t) : \
+ FUSBH200_ITD_LENGTH(t); \
+})
+/*-------------------------------------------------------------------------*/
+
+#endif /* __LINUX_FUSBH200_H */
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 8582236e4ca..d0d8fadf706 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -54,7 +54,7 @@
* DWA).
*/
#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
@@ -85,7 +85,7 @@ static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
cluster_id,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
cluster_id, result);
@@ -105,7 +105,7 @@ static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | slots,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -158,8 +158,15 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd)
goto error_set_cluster_id;
usb_hcd->uses_new_polling = 1;
- usb_hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
+
+ /*
+ * prevent USB core from suspending the root hub since
+ * bus_suspend and bus_resume are not yet supported.
+ */
+ pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev);
+
result = 0;
out:
mutex_unlock(&wusbhc->mutex);
@@ -172,25 +179,6 @@ error_cluster_id_get:
}
-static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc, *(unsigned long *) &msg);
- return -ENOSYS;
-}
-
-static int hwahc_op_resume(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc);
- return -ENOSYS;
-}
-
/*
* No need to abort pipes, as when this is called, all the children
* has been disconnected and that has done it [through
@@ -210,10 +198,14 @@ static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, hwahc);
- return -ENOSYS;
+ /*
+ * We cannot query the HWA for the WUSB time since that requires sending
+ * a synchronous URB and this function can be called in_interrupt.
+ * Instead, query the USB frame number for our parent and use that.
+ */
+ return usb_get_current_frame_number(wa->usb_dev);
}
static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
@@ -231,7 +223,7 @@ static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- return wa_urb_dequeue(&hwahc->wa, urb);
+ return wa_urb_dequeue(&hwahc->wa, urb, status);
}
/*
@@ -269,8 +261,44 @@ static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
dev_err(dev, "cannot listen to notifications: %d\n", result);
goto error_stop;
}
+ /*
+ * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
+ * disable transfer notifications.
+ */
+ if (hwahc->wa.quirks &
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
+ struct usb_host_interface *cur_altsetting =
+ hwahc->wa.usb_iface->cur_altsetting;
+
+ result = usb_control_msg(hwahc->wa.usb_dev,
+ usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ WA_REQ_ALEREON_FEATURE_SET,
+ cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ /*
+ * If we successfully sent the control message, start DTI here
+ * because no transfer notifications will be received which is
+ * where DTI is normally started.
+ */
+ if (result == 0)
+ result = wa_dti_start(&hwahc->wa);
+ else
+ result = 0; /* OK. Continue normally. */
+
+ if (result < 0) {
+ dev_err(dev, "cannot start DTI: %d\n", result);
+ goto error_dti_start;
+ }
+ }
+
return result;
+error_dti_start:
+ wa_nep_disarm(&hwahc->wa);
error_stop:
__wa_clear_feature(&hwahc->wa, WA_ENABLE);
return result;
@@ -288,7 +316,7 @@ static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
delay * 1000,
iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret == 0)
msleep(delay);
@@ -317,7 +345,7 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
stream_index,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
goto out;
@@ -328,7 +356,7 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
WUSB_REQ_SET_WUSB_MAS,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- mas_le, 32, 1000 /* FIXME: arbitrary */);
+ mas_le, 32, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
out:
@@ -362,7 +390,7 @@ static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
interval << 8 | repeat_cnt,
handle << 8 | iface_no,
- wuie, wuie->bLength, 1000 /* FIXME: arbitrary */);
+ wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -379,7 +407,7 @@ static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
WUSB_REQ_REMOVE_MMC_IE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, handle << 8 | iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -422,7 +450,7 @@ static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wusb_dev->port_idx << 8 | iface_no,
dev_info, sizeof(struct hwa_dev_info),
- 1000 /* FIXME: arbitrary */);
+ USB_CTRL_SET_TIMEOUT);
kfree(dev_info);
return ret;
}
@@ -462,10 +490,9 @@ static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
USB_DT_KEY << 8 | key_idx,
port_idx << 8 | iface_no,
- keyd, keyd_len, 1000 /* FIXME: arbitrary */);
+ keyd, keyd_len, USB_CTRL_SET_TIMEOUT);
- memset(keyd, 0, sizeof(*keyd)); /* clear keys etc. */
- kfree(keyd);
+ kzfree(keyd); /* clear keys etc. */
return result;
}
@@ -500,12 +527,12 @@ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
encryption_value = 0;
}
- /* Set the encryption type for commmunicating with the device */
+ /* Set the encryption type for communicating with the device */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
encryption_value, port_idx << 8 | iface_no,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
"port index %u to %s (value %d): %d\n", port_idx,
@@ -578,14 +605,10 @@ found:
goto error;
}
wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
- /* Make LE fields CPU order */
- wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion);
- wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes);
- wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock);
- if (wa_descr->bcdWAVersion > 0x0100)
+ if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
- wa_descr->bcdWAVersion & 0xff00 >> 8,
- wa_descr->bcdWAVersion & 0x00ff);
+ le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00 >> 8,
+ le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
result = 0;
error:
return result;
@@ -596,11 +619,9 @@ static struct hc_driver hwahc_hc_driver = {
.product_desc = "Wireless USB HWA host controller",
.hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
.irq = NULL, /* FIXME */
- .flags = HCD_USB2, /* FIXME */
+ .flags = HCD_USB25,
.reset = hwahc_op_reset,
.start = hwahc_op_start,
- .pci_suspend = hwahc_op_suspend,
- .pci_resume = hwahc_op_resume,
.stop = hwahc_op_stop,
.get_frame_number = hwahc_op_get_frame_number,
.urb_enqueue = hwahc_op_urb_enqueue,
@@ -609,8 +630,6 @@ static struct hc_driver hwahc_hc_driver = {
.hub_status_data = wusbhc_rh_status_data,
.hub_control = wusbhc_rh_control,
- .bus_suspend = wusbhc_rh_suspend,
- .bus_resume = wusbhc_rh_resume,
.start_port_reset = wusbhc_rh_start_port_reset,
};
@@ -695,7 +714,8 @@ static void hwahc_security_release(struct hwahc *hwahc)
/* nothing to do here so far... */
}
-static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
+static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface,
+ kernel_ulong_t quirks)
{
int result;
struct device *dev = &iface->dev;
@@ -706,12 +726,9 @@ static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
wa->usb_iface = usb_get_intf(iface);
wusbhc->dev = dev;
- wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent);
- if (wusbhc->uwb_rc == NULL) {
- result = -ENODEV;
- dev_err(dev, "Cannot get associated UWB Host Controller\n");
- goto error_rc_get;
- }
+ /* defer getting the uwb_rc handle until it is needed since it
+ * may not have been registered by the hwa_rc driver yet. */
+ wusbhc->uwb_rc = NULL;
result = wa_fill_descr(wa); /* Get the device descriptor */
if (result < 0)
goto error_fill_descriptor;
@@ -743,7 +760,7 @@ static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
goto error_wusbhc_create;
}
- result = wa_create(&hwahc->wa, iface);
+ result = wa_create(&hwahc->wa, iface, quirks);
if (result < 0)
goto error_wa_create;
return 0;
@@ -754,8 +771,6 @@ error_wusbhc_create:
/* WA Descr fill allocs no resources */
error_security_create:
error_fill_descriptor:
- uwb_rc_put(wusbhc->uwb_rc);
-error_rc_get:
usb_put_intf(iface);
usb_put_dev(usb_dev);
return result;
@@ -797,11 +812,11 @@ static int hwahc_probe(struct usb_interface *usb_iface,
goto error_alloc;
}
usb_hcd->wireless = 1;
- usb_hcd->flags |= HCD_FLAG_SAW_IRQ;
+ usb_hcd->self.sg_tablesize = ~0;
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
- result = hwahc_create(hwahc, usb_iface);
+ result = hwahc_create(hwahc, usb_iface, id->driver_info);
if (result < 0) {
dev_err(dev, "Cannot initialize internals: %d\n", result);
goto error_hwahc_create;
@@ -811,6 +826,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
dev_err(dev, "Cannot add HCD: %d\n", result);
goto error_add_hcd;
}
+ device_wakeup_enable(usb_hcd->self.controller);
result = wusbhc_b_create(&hwahc->wusbhc);
if (result < 0) {
dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
@@ -845,6 +861,14 @@ static void hwahc_disconnect(struct usb_interface *usb_iface)
}
static struct usb_device_id hwahc_id_table[] = {
+ /* Alereon 5310 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
+ /* Alereon 5611 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
/* FIXME: use class labels for this */
{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
{},
@@ -858,18 +882,7 @@ static struct usb_driver hwahc_driver = {
.id_table = hwahc_id_table,
};
-static int __init hwahc_driver_init(void)
-{
- return usb_register(&hwahc_driver);
-}
-module_init(hwahc_driver_init);
-
-static void __exit hwahc_driver_exit(void)
-{
- usb_deregister(&hwahc_driver);
-}
-module_exit(hwahc_driver_exit);
-
+module_usb_driver(hwahc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 00000000000..4f320d050da
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2009 by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* this file is part of imx21-hcd.c */
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+#ifndef DEBUG
+
+static inline void create_debug_files(struct imx21 *imx21) { }
+static inline void remove_debug_files(struct imx21 *imx21) { }
+static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
+ int status) {}
+static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_etd_allocated(struct imx21 *imx21) {}
+static inline void debug_etd_freed(struct imx21 *imx21) {}
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
+static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
+static inline void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td) {}
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len) {}
+
+#else
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static const char *dir_labels[] = {
+ "TD 0",
+ "OUT",
+ "IN",
+ "TD 1"
+};
+
+static const char *speed_labels[] = {
+ "Full",
+ "Low"
+};
+
+static const char *format_labels[] = {
+ "Control",
+ "ISO",
+ "Bulk",
+ "Interrupt"
+};
+
+static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
+ struct urb *urb)
+{
+ return usb_pipeisoc(urb->pipe) ?
+ &imx21->isoc_stats : &imx21->nonisoc_stats;
+}
+
+static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->submitted++;
+}
+
+static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
+{
+ if (st)
+ stats_for_urb(imx21, urb)->completed_failed++;
+ else
+ stats_for_urb(imx21, urb)->completed_ok++;
+}
+
+static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->unlinked++;
+}
+
+static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_etd++;
+}
+
+static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_dmem++;
+}
+
+static inline void debug_etd_allocated(struct imx21 *imx21)
+{
+ imx21->etd_usage.maximum = max(
+ ++(imx21->etd_usage.value),
+ imx21->etd_usage.maximum);
+}
+
+static inline void debug_etd_freed(struct imx21 *imx21)
+{
+ imx21->etd_usage.value--;
+}
+
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value += size;
+ imx21->dmem_usage.maximum = max(
+ imx21->dmem_usage.value,
+ imx21->dmem_usage.maximum);
+}
+
+static inline void debug_dmem_freed(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value -= size;
+}
+
+
+static void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td)
+{
+ struct debug_isoc_trace *trace = &imx21->isoc_trace[
+ imx21->isoc_trace_index++];
+
+ imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
+ trace->schedule_frame = td->frame;
+ trace->submit_frame = frame;
+ trace->request_len = td->len;
+ trace->td = td;
+}
+
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len)
+{
+ struct debug_isoc_trace *trace, *trace_failed;
+ int i;
+ int found = 0;
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
+ if (trace->td == td) {
+ trace->done_frame = frame;
+ trace->done_len = len;
+ trace->cc = cc;
+ trace->td = NULL;
+ found = 1;
+ break;
+ }
+ }
+
+ if (found && cc) {
+ trace_failed = &imx21->isoc_trace_failed[
+ imx21->isoc_trace_index_failed++];
+
+ imx21->isoc_trace_index_failed %= ARRAY_SIZE(
+ imx21->isoc_trace_failed);
+ *trace_failed = *trace;
+ }
+}
+
+
+static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
+{
+ if (ep)
+ snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
+ ep->desc.bEndpointAddress,
+ usb_endpoint_type(&ep->desc),
+ ep);
+ else
+ snprintf(buf, bufsize, "none");
+ return buf;
+}
+
+static char *format_etd_dword0(u32 value, char *buf, int bufsize)
+{
+ snprintf(buf, bufsize,
+ "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
+ value & 0x7F,
+ (value >> DW0_ENDPNT) & 0x0F,
+ dir_labels[(value >> DW0_DIRECT) & 0x03],
+ speed_labels[(value >> DW0_SPEED) & 0x01],
+ format_labels[(value >> DW0_FORMAT) & 0x03],
+ (value >> DW0_HALTED) & 0x01);
+ return buf;
+}
+
+static int debug_status_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ int etds_allocated = 0;
+ int etds_sw_busy = 0;
+ int etds_hw_busy = 0;
+ int dmem_blocks = 0;
+ int queued_for_etd = 0;
+ int queued_for_dmem = 0;
+ unsigned int dmem_bytes = 0;
+ int i;
+ struct etd_priv *etd;
+ u32 etd_enable_mask;
+ unsigned long flags;
+ struct imx21_dmem_area *dmem;
+ struct ep_priv *ep_priv;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc)
+ etds_allocated++;
+ if (etd->urb)
+ etds_sw_busy++;
+ if (etd_enable_mask & (1<<i))
+ etds_hw_busy++;
+ }
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list) {
+ dmem_bytes += dmem->size;
+ dmem_blocks++;
+ }
+
+ list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
+ queued_for_etd++;
+
+ list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
+ queued_for_dmem++;
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ seq_printf(s,
+ "Frame: %d\n"
+ "ETDs allocated: %d/%d (max=%d)\n"
+ "ETDs in use sw: %d\n"
+ "ETDs in use hw: %d\n"
+ "DMEM allocated: %d/%d (max=%d)\n"
+ "DMEM blocks: %d\n"
+ "Queued waiting for ETD: %d\n"
+ "Queued waiting for DMEM: %d\n",
+ readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
+ etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
+ etds_sw_busy,
+ etds_hw_busy,
+ dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
+ dmem_blocks,
+ queued_for_etd,
+ queued_for_dmem);
+
+ return 0;
+}
+
+static int debug_dmem_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct imx21_dmem_area *dmem;
+ unsigned long flags;
+ char ep_text[40];
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list)
+ seq_printf(s,
+ "%04X: size=0x%X "
+ "ep=%s\n",
+ dmem->offset, dmem->size,
+ format_ep(dmem->ep, ep_text, sizeof(ep_text)));
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_etd_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct etd_priv *etd;
+ char buf[60];
+ u32 dword;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ int state = -1;
+ struct urb_priv *urb_priv;
+ if (etd->urb) {
+ urb_priv = etd->urb->hcpriv;
+ if (urb_priv)
+ state = urb_priv->state;
+ }
+
+ seq_printf(s,
+ "etd_num: %d\n"
+ "ep: %s\n"
+ "alloc: %d\n"
+ "len: %d\n"
+ "busy sw: %d\n"
+ "busy hw: %d\n"
+ "urb state: %d\n"
+ "current urb: %p\n",
+
+ i,
+ format_ep(etd->ep, buf, sizeof(buf)),
+ etd->alloc,
+ etd->len,
+ etd->urb != NULL,
+ (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
+ state,
+ etd->urb);
+
+ for (j = 0; j < 4; j++) {
+ dword = etd_readl(imx21, i, j);
+ switch (j) {
+ case 0:
+ format_etd_dword0(dword, buf, sizeof(buf));
+ break;
+ case 2:
+ snprintf(buf, sizeof(buf),
+ "cc=0X%02X", dword >> DW2_COMPCODE);
+ break;
+ default:
+ *buf = 0;
+ break;
+ }
+ seq_printf(s,
+ "dword %d: submitted=%08X cur=%08X [%s]\n",
+ j,
+ etd->submitted_dwords[j],
+ dword,
+ buf);
+ }
+ seq_printf(s, "\n");
+ }
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_statistics_show_one(struct seq_file *s,
+ const char *name, struct debug_stats *stats)
+{
+ seq_printf(s, "%s:\n"
+ "submitted URBs: %lu\n"
+ "completed OK: %lu\n"
+ "completed failed: %lu\n"
+ "unlinked: %lu\n"
+ "queued for ETD: %lu\n"
+ "queued for DMEM: %lu\n\n",
+ name,
+ stats->submitted,
+ stats->completed_ok,
+ stats->completed_failed,
+ stats->unlinked,
+ stats->queue_etd,
+ stats->queue_dmem);
+}
+
+static int debug_statistics_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
+ debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
+ seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_isoc_show_one(struct seq_file *s,
+ const char *name, int index, struct debug_isoc_trace *trace)
+{
+ seq_printf(s, "%s %d:\n"
+ "cc=0X%02X\n"
+ "scheduled frame %d (%d)\n"
+ "submitted frame %d (%d)\n"
+ "completed frame %d (%d)\n"
+ "requested length=%d\n"
+ "completed length=%d\n\n",
+ name, index,
+ trace->cc,
+ trace->schedule_frame, trace->schedule_frame & 0xFFFF,
+ trace->submit_frame, trace->submit_frame & 0xFFFF,
+ trace->done_frame, trace->done_frame & 0xFFFF,
+ trace->request_len,
+ trace->done_len);
+}
+
+static int debug_isoc_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct debug_isoc_trace *trace;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ trace = imx21->isoc_trace_failed;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
+ debug_isoc_show_one(s, "isoc failed", i, trace);
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
+ debug_isoc_show_one(s, "isoc", i, trace);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_status_show, inode->i_private);
+}
+
+static int debug_dmem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_dmem_show, inode->i_private);
+}
+
+static int debug_etd_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_etd_show, inode->i_private);
+}
+
+static int debug_statistics_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_statistics_show, inode->i_private);
+}
+
+static int debug_isoc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_isoc_show, inode->i_private);
+}
+
+static const struct file_operations debug_status_fops = {
+ .open = debug_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_dmem_fops = {
+ .open = debug_dmem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_etd_fops = {
+ .open = debug_etd_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_statistics_fops = {
+ .open = debug_statistics_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_isoc_fops = {
+ .open = debug_isoc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void create_debug_files(struct imx21 *imx21)
+{
+ imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ if (!imx21->debug_root)
+ goto failed_create_rootdir;
+
+ if (!debugfs_create_file("status", S_IRUGO,
+ imx21->debug_root, imx21, &debug_status_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("dmem", S_IRUGO,
+ imx21->debug_root, imx21, &debug_dmem_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("etd", S_IRUGO,
+ imx21->debug_root, imx21, &debug_etd_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("statistics", S_IRUGO,
+ imx21->debug_root, imx21, &debug_statistics_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("isoc", S_IRUGO,
+ imx21->debug_root, imx21, &debug_isoc_fops))
+ goto failed_create;
+
+ return;
+
+failed_create:
+ debugfs_remove_recursive(imx21->debug_root);
+
+failed_create_rootdir:
+ imx21->debug_root = NULL;
+}
+
+
+static void remove_debug_files(struct imx21 *imx21)
+{
+ if (imx21->debug_root) {
+ debugfs_remove_recursive(imx21->debug_root);
+ imx21->debug_root = NULL;
+ }
+}
+
+#endif
+
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 00000000000..207bad99301
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1947 @@
+/*
+ * USB Host Controller Driver for IMX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+ /*
+ * The i.MX21 USB hardware contains
+ * * 32 transfer descriptors (called ETDs)
+ * * 4Kb of Data memory
+ *
+ * The data memory is shared between the host and function controllers
+ * (but this driver only supports the host controller)
+ *
+ * So setting up a transfer involves:
+ * * Allocating a ETD
+ * * Fill in ETD with appropriate information
+ * * Allocating data memory (and putting the offset in the ETD)
+ * * Activate the ETD
+ * * Get interrupt when done.
+ *
+ * An ETD is assigned to each active endpoint.
+ *
+ * Low resource (ETD and Data memory) situations are handled differently for
+ * isochronous and non insosynchronous transactions :
+ *
+ * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
+ *
+ * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
+ * They allocate both ETDs and Data memory during URB submission
+ * (and fail if unavailable).
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "imx21-hcd.h"
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+#ifdef DEBUG
+#define DEBUG_LOG_FRAME(imx21, etd, event) \
+ (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
+#else
+#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
+#endif
+
+static const char hcd_name[] = "imx21-hcd";
+
+static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
+{
+ return (struct imx21 *)hcd->hcd_priv;
+}
+
+
+/* =========================================== */
+/* Hardware access helpers */
+/* =========================================== */
+
+static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) | mask, reg);
+}
+
+static inline void clear_register_bits(struct imx21 *imx21,
+ u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) & ~mask, reg);
+}
+
+static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (readl(reg) & mask)
+ writel(mask, reg);
+}
+
+static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (!(readl(reg) & mask))
+ writel(mask, reg);
+}
+
+static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
+{
+ writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
+{
+ return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static inline int wrap_frame(int counter)
+{
+ return counter & 0xFFFF;
+}
+
+static inline int frame_after(int frame, int after)
+{
+ /* handle wrapping like jiffies time_afer */
+ return (s16)((s16)after - (s16)frame) < 0;
+}
+
+static int imx21_hc_get_frame(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+
+ return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
+}
+
+static inline bool unsuitable_for_dma(dma_addr_t addr)
+{
+ return (addr & 3) != 0;
+}
+
+#include "imx21-dbg.c"
+
+static void nonisoc_urb_completed_for_etd(
+ struct imx21 *imx21, struct etd_priv *etd, int status);
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
+static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
+
+/* =========================================== */
+/* ETD management */
+/* =========================================== */
+
+static int alloc_etd(struct imx21 *imx21)
+{
+ int i;
+ struct etd_priv *etd = imx21->etd;
+
+ for (i = 0; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc == 0) {
+ memset(etd, 0, sizeof(imx21->etd[0]));
+ etd->alloc = 1;
+ debug_etd_allocated(imx21);
+ return i;
+ }
+ }
+ return -1;
+}
+
+static void disactivate_etd(struct imx21 *imx21, int num)
+{
+ int etd_mask = (1 << num);
+ struct etd_priv *etd = &imx21->etd[num];
+
+ writel(etd_mask, imx21->regs + USBH_ETDENCLR);
+ clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+
+ etd->active_count = 0;
+
+ DEBUG_LOG_FRAME(imx21, etd, disactivated);
+}
+
+static void reset_etd(struct imx21 *imx21, int num)
+{
+ struct etd_priv *etd = imx21->etd + num;
+ int i;
+
+ disactivate_etd(imx21, num);
+
+ for (i = 0; i < 4; i++)
+ etd_writel(imx21, num, i, 0);
+ etd->urb = NULL;
+ etd->ep = NULL;
+ etd->td = NULL;
+ etd->bounce_buffer = NULL;
+}
+
+static void free_etd(struct imx21 *imx21, int num)
+{
+ if (num < 0)
+ return;
+
+ if (num >= USB_NUM_ETD) {
+ dev_err(imx21->dev, "BAD etd=%d!\n", num);
+ return;
+ }
+ if (imx21->etd[num].alloc == 0) {
+ dev_err(imx21->dev, "ETD %d already free!\n", num);
+ return;
+ }
+
+ debug_etd_freed(imx21);
+ reset_etd(imx21, num);
+ memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
+}
+
+
+static void setup_etd_dword0(struct imx21 *imx21,
+ int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
+{
+ etd_writel(imx21, etd_num, 0,
+ ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
+ ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
+ ((u32) dir << DW0_DIRECT) |
+ ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
+ 1 : 0) << DW0_SPEED) |
+ ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
+ ((u32) maxpacket << DW0_MAXPKTSIZ));
+}
+
+/**
+ * Copy buffer to data controller data memory.
+ * We cannot use memcpy_toio() because the hardware requires 32bit writes
+ */
+static void copy_to_dmem(
+ struct imx21 *imx21, int dmem_offset, void *src, int count)
+{
+ void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
+ u32 word = 0;
+ u8 *p = src;
+ int byte = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ byte = i % 4;
+ word += (*p++ << (byte * 8));
+ if (byte == 3) {
+ writel(word, dmem);
+ dmem += 4;
+ word = 0;
+ }
+ }
+
+ if (count && byte != 3)
+ writel(word, dmem);
+}
+
+static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
+{
+ u32 etd_mask = 1 << etd_num;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+ if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
+ /* For non aligned isoc the condition below is always true */
+ if (etd->len <= etd->dmem_size) {
+ /* Fits into data memory, use PIO */
+ if (dir != TD_DIR_IN) {
+ copy_to_dmem(imx21,
+ etd->dmem_offset,
+ etd->cpu_buffer, etd->len);
+ }
+ etd->dma_handle = 0;
+
+ } else {
+ /* Too big for data memory, use bounce buffer */
+ enum dma_data_direction dmadir;
+
+ if (dir == TD_DIR_IN) {
+ dmadir = DMA_FROM_DEVICE;
+ etd->bounce_buffer = kmalloc(etd->len,
+ GFP_ATOMIC);
+ } else {
+ dmadir = DMA_TO_DEVICE;
+ etd->bounce_buffer = kmemdup(etd->cpu_buffer,
+ etd->len,
+ GFP_ATOMIC);
+ }
+ if (!etd->bounce_buffer) {
+ dev_err(imx21->dev, "failed bounce alloc\n");
+ goto err_bounce_alloc;
+ }
+
+ etd->dma_handle =
+ dma_map_single(imx21->dev,
+ etd->bounce_buffer,
+ etd->len,
+ dmadir);
+ if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
+ dev_err(imx21->dev, "failed bounce map\n");
+ goto err_bounce_map;
+ }
+ }
+ }
+
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+ set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+ if (etd->dma_handle) {
+ set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
+ clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
+ writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
+ set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
+ } else {
+ if (dir != TD_DIR_IN) {
+ /* need to set for ZLP and PIO */
+ set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+ }
+ }
+
+ DEBUG_LOG_FRAME(imx21, etd, activated);
+
+#ifdef DEBUG
+ if (!etd->active_count) {
+ int i;
+ etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
+ etd->disactivated_frame = -1;
+ etd->last_int_frame = -1;
+ etd->last_req_frame = -1;
+
+ for (i = 0; i < 4; i++)
+ etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
+ }
+#endif
+
+ etd->active_count = 1;
+ writel(etd_mask, imx21->regs + USBH_ETDENSET);
+ return;
+
+err_bounce_map:
+ kfree(etd->bounce_buffer);
+
+err_bounce_alloc:
+ free_dmem(imx21, etd);
+ nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
+}
+
+/* =========================================== */
+/* Data memory management */
+/* =========================================== */
+
+static int alloc_dmem(struct imx21 *imx21, unsigned int size,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int offset = 0;
+ struct imx21_dmem_area *area;
+ struct imx21_dmem_area *tmp;
+
+ size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
+
+ if (size > DMEM_SIZE) {
+ dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
+ size, DMEM_SIZE);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp, &imx21->dmem_list, list) {
+ if ((size + offset) < offset)
+ goto fail;
+ if ((size + offset) <= tmp->offset)
+ break;
+ offset = tmp->size + tmp->offset;
+ if ((offset + size) > DMEM_SIZE)
+ goto fail;
+ }
+
+ area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
+ if (area == NULL)
+ return -ENOMEM;
+
+ area->ep = ep;
+ area->offset = offset;
+ area->size = size;
+ list_add_tail(&area->list, &tmp->list);
+ debug_dmem_allocated(imx21, size);
+ return offset;
+
+fail:
+ return -ENOMEM;
+}
+
+/* Memory now available for a queued ETD - activate it */
+static void activate_queued_etd(struct imx21 *imx21,
+ struct etd_priv *etd, u32 dmem_offset)
+{
+ struct urb_priv *urb_priv = etd->urb->hcpriv;
+ int etd_num = etd - &imx21->etd[0];
+ u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
+ u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
+
+ dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
+ etd_num);
+ etd_writel(imx21, etd_num, 1,
+ ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
+
+ etd->dmem_offset = dmem_offset;
+ urb_priv->active = 1;
+ activate_etd(imx21, etd_num, dir);
+}
+
+static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
+{
+ struct imx21_dmem_area *area;
+ struct etd_priv *tmp;
+ int found = 0;
+ int offset;
+
+ if (!etd->dmem_size)
+ return;
+ etd->dmem_size = 0;
+
+ offset = etd->dmem_offset;
+ list_for_each_entry(area, &imx21->dmem_list, list) {
+ if (area->offset == offset) {
+ debug_dmem_freed(imx21, area->size);
+ list_del(&area->list);
+ kfree(area);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(imx21->dev,
+ "Trying to free unallocated DMEM %d\n", offset);
+ return;
+ }
+
+ /* Try again to allocate memory for anything we've queued */
+ list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
+ offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
+ if (offset >= 0) {
+ list_del(&etd->queue);
+ activate_queued_etd(imx21, etd, (u32)offset);
+ }
+ }
+}
+
+static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct imx21_dmem_area *area, *tmp;
+
+ list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
+ if (area->ep == ep) {
+ dev_err(imx21->dev,
+ "Active DMEM %d for disabled ep=%p\n",
+ area->offset, ep);
+ list_del(&area->list);
+ kfree(area);
+ }
+ }
+}
+
+
+/* =========================================== */
+/* End handling */
+/* =========================================== */
+
+/* Endpoint now idle - release its ETD(s) or assign to queued request */
+static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
+{
+ int i;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ int etd_num = ep_priv->etd[i];
+ struct etd_priv *etd;
+ if (etd_num < 0)
+ continue;
+
+ etd = &imx21->etd[etd_num];
+ ep_priv->etd[i] = -1;
+
+ free_dmem(imx21, etd); /* for isoc */
+
+ if (list_empty(&imx21->queue_for_etd)) {
+ free_etd(imx21, etd_num);
+ continue;
+ }
+
+ dev_dbg(imx21->dev,
+ "assigning idle etd %d for queued request\n", etd_num);
+ ep_priv = list_first_entry(&imx21->queue_for_etd,
+ struct ep_priv, queue);
+ list_del(&ep_priv->queue);
+ reset_etd(imx21, etd_num);
+ ep_priv->waiting_etd = 0;
+ ep_priv->etd[i] = etd_num;
+
+ if (list_empty(&ep_priv->ep->urb_list)) {
+ dev_err(imx21->dev, "No urb for queued ep!\n");
+ continue;
+ }
+ schedule_nonisoc_etd(imx21, list_first_entry(
+ &ep_priv->ep->urb_list, struct urb, urb_list));
+ }
+}
+
+static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
+__releases(imx21->lock)
+__acquires(imx21->lock)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = urb->ep->hcpriv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+
+ debug_urb_completed(imx21, urb, status);
+ dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
+
+ kfree(urb_priv->isoc_td);
+ kfree(urb->hcpriv);
+ urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&imx21->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&imx21->lock);
+ if (list_empty(&ep_priv->ep->urb_list))
+ ep_idle(imx21, ep_priv);
+}
+
+static void nonisoc_urb_completed_for_etd(
+ struct imx21 *imx21, struct etd_priv *etd, int status)
+{
+ struct usb_host_endpoint *ep = etd->ep;
+
+ urb_done(imx21->hcd, etd->urb, status);
+ etd->urb = NULL;
+
+ if (!list_empty(&ep->urb_list)) {
+ struct urb *urb = list_first_entry(
+ &ep->urb_list, struct urb, urb_list);
+
+ dev_vdbg(imx21->dev, "next URB %p\n", urb);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+}
+
+
+/* =========================================== */
+/* ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_isoc_etds(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = ep->hcpriv;
+ struct etd_priv *etd;
+ struct urb_priv *urb_priv;
+ struct td *td;
+ int etd_num;
+ int i;
+ int cur_frame;
+ u8 dir;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+too_late:
+ if (list_empty(&ep_priv->td_list))
+ break;
+
+ etd_num = ep_priv->etd[i];
+ if (etd_num < 0)
+ break;
+
+ etd = &imx21->etd[etd_num];
+ if (etd->urb)
+ continue;
+
+ td = list_entry(ep_priv->td_list.next, struct td, list);
+ list_del(&td->list);
+ urb_priv = td->urb->hcpriv;
+
+ cur_frame = imx21_hc_get_frame(hcd);
+ if (frame_after(cur_frame, td->frame)) {
+ dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
+ cur_frame, td->frame);
+ urb_priv->isoc_status = -EXDEV;
+ td->urb->iso_frame_desc[
+ td->isoc_index].actual_length = 0;
+ td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, td->urb, urb_priv->isoc_status);
+ goto too_late;
+ }
+
+ urb_priv->active = 1;
+ etd->td = td;
+ etd->ep = td->ep;
+ etd->urb = td->urb;
+ etd->len = td->len;
+ etd->dma_handle = td->dma_handle;
+ etd->cpu_buffer = td->cpu_buffer;
+
+ debug_isoc_submitted(imx21, cur_frame, td);
+
+ dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
+ etd_writel(imx21, etd_num, 1, etd->dmem_offset);
+ etd_writel(imx21, etd_num, 2,
+ (TD_NOTACCESSED << DW2_COMPCODE) |
+ ((td->frame & 0xFFFF) << DW2_STARTFRM));
+ etd_writel(imx21, etd_num, 3,
+ (TD_NOTACCESSED << DW3_COMPCODE0) |
+ (td->len << DW3_PKTLEN0));
+
+ activate_etd(imx21, etd_num, dir);
+ }
+}
+
+static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int etd_mask = 1 << etd_num;
+ struct etd_priv *etd = imx21->etd + etd_num;
+ struct urb *urb = etd->urb;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct td *td = etd->td;
+ struct usb_host_endpoint *ep = etd->ep;
+ int isoc_index = td->isoc_index;
+ unsigned int pipe = urb->pipe;
+ int dir_in = usb_pipein(pipe);
+ int cc;
+ int bytes_xfrd;
+
+ disactivate_etd(imx21, etd_num);
+
+ cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
+ bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
+
+ /* Input doesn't always fill the buffer, don't generate an error
+ * when this happens.
+ */
+ if (dir_in && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc == TD_NOTACCESSED)
+ bytes_xfrd = 0;
+
+ debug_isoc_completed(imx21,
+ imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
+ if (cc) {
+ urb_priv->isoc_status = -EXDEV;
+ dev_dbg(imx21->dev,
+ "bad iso cc=0x%X frame=%d sched frame=%d "
+ "cnt=%d len=%d urb=%p etd=%d index=%d\n",
+ cc, imx21_hc_get_frame(hcd), td->frame,
+ bytes_xfrd, td->len, urb, etd_num, isoc_index);
+ }
+
+ if (dir_in) {
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ if (!etd->dma_handle)
+ memcpy_fromio(etd->cpu_buffer,
+ imx21->regs + USBOTG_DMEM + etd->dmem_offset,
+ bytes_xfrd);
+ }
+
+ urb->actual_length += bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
+
+ etd->td = NULL;
+ etd->urb = NULL;
+ etd->ep = NULL;
+
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, urb, urb_priv->isoc_status);
+
+ schedule_isoc_etds(hcd, ep);
+}
+
+static struct ep_priv *alloc_isoc_ep(
+ struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct ep_priv *ep_priv;
+ int i;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (!ep_priv)
+ return NULL;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++)
+ ep_priv->etd[i] = -1;
+
+ INIT_LIST_HEAD(&ep_priv->td_list);
+ ep_priv->ep = ep;
+ ep->hcpriv = ep_priv;
+ return ep_priv;
+}
+
+static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
+{
+ int i, j;
+ int etd_num;
+
+ /* Allocate the ETDs if required */
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ if (ep_priv->etd[i] < 0) {
+ etd_num = alloc_etd(imx21);
+ if (etd_num < 0)
+ goto alloc_etd_failed;
+
+ ep_priv->etd[i] = etd_num;
+ imx21->etd[etd_num].ep = ep_priv->ep;
+ }
+ }
+ return 0;
+
+alloc_etd_failed:
+ dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
+ for (j = 0; j < i; j++) {
+ free_etd(imx21, ep_priv->etd[j]);
+ ep_priv->etd[j] = -1;
+ }
+ return -ENOMEM;
+}
+
+static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct urb_priv *urb_priv;
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ struct td *td = NULL;
+ int i;
+ int ret;
+ int cur_frame;
+ u16 maxpacket;
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (urb_priv == NULL)
+ return -ENOMEM;
+
+ urb_priv->isoc_td = kzalloc(
+ sizeof(struct td) * urb->number_of_packets, mem_flags);
+ if (urb_priv->isoc_td == NULL) {
+ ret = -ENOMEM;
+ goto alloc_td_failed;
+ }
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ if (ep->hcpriv == NULL) {
+ ep_priv = alloc_isoc_ep(imx21, ep);
+ if (ep_priv == NULL) {
+ ret = -ENOMEM;
+ goto alloc_ep_failed;
+ }
+ } else {
+ ep_priv = ep->hcpriv;
+ }
+
+ ret = alloc_isoc_etds(imx21, ep_priv);
+ if (ret)
+ goto alloc_etd_failed;
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto link_failed;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ /* allocate data memory for largest packets if not already done */
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
+
+ if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
+ /* not sure if this can really occur.... */
+ dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
+ etd->dmem_size, maxpacket);
+ ret = -EMSGSIZE;
+ goto alloc_dmem_failed;
+ }
+
+ if (etd->dmem_size == 0) {
+ etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
+ if (etd->dmem_offset < 0) {
+ dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
+ ret = -EAGAIN;
+ goto alloc_dmem_failed;
+ }
+ etd->dmem_size = maxpacket;
+ }
+ }
+
+ /* calculate frame */
+ cur_frame = imx21_hc_get_frame(hcd);
+ i = 0;
+ if (list_empty(&ep_priv->td_list)) {
+ urb->start_frame = wrap_frame(cur_frame + 5);
+ } else {
+ urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
+ struct td, list)->frame + urb->interval);
+
+ if (frame_after(cur_frame, urb->start_frame)) {
+ dev_dbg(imx21->dev,
+ "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
+ urb->start_frame, cur_frame,
+ (urb->transfer_flags & URB_ISO_ASAP) != 0);
+ i = DIV_ROUND_UP(wrap_frame(
+ cur_frame - urb->start_frame),
+ urb->interval);
+
+ /* Treat underruns as if URB_ISO_ASAP was set */
+ if ((urb->transfer_flags & URB_ISO_ASAP) ||
+ i >= urb->number_of_packets) {
+ urb->start_frame = wrap_frame(urb->start_frame
+ + i * urb->interval);
+ i = 0;
+ }
+ }
+ }
+
+ /* set up transfers */
+ urb_priv->isoc_remaining = urb->number_of_packets - i;
+ td = urb_priv->isoc_td;
+ for (; i < urb->number_of_packets; i++, td++) {
+ unsigned int offset = urb->iso_frame_desc[i].offset;
+ td->ep = ep;
+ td->urb = urb;
+ td->len = urb->iso_frame_desc[i].length;
+ td->isoc_index = i;
+ td->frame = wrap_frame(urb->start_frame + urb->interval * i);
+ td->dma_handle = urb->transfer_dma + offset;
+ td->cpu_buffer = urb->transfer_buffer + offset;
+ list_add_tail(&td->list, &ep_priv->td_list);
+ }
+
+ dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
+ urb->number_of_packets, urb->start_frame, td->frame);
+
+ debug_urb_submitted(imx21, urb);
+ schedule_isoc_etds(hcd, ep);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+alloc_dmem_failed:
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+link_failed:
+alloc_etd_failed:
+alloc_ep_failed:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv->isoc_td);
+
+alloc_td_failed:
+ kfree(urb_priv);
+ return ret;
+}
+
+static void dequeue_isoc_urb(struct imx21 *imx21,
+ struct urb *urb, struct ep_priv *ep_priv)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct td *td, *tmp;
+ int i;
+
+ if (urb_priv->active) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ int etd_num = ep_priv->etd[i];
+ if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
+ struct etd_priv *etd = imx21->etd + etd_num;
+
+ reset_etd(imx21, etd_num);
+ free_dmem(imx21, etd);
+ }
+ }
+ }
+
+ list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
+ if (td->urb == urb) {
+ dev_vdbg(imx21->dev, "removing td %p\n", td);
+ list_del(&td->list);
+ }
+ }
+}
+
+/* =========================================== */
+/* NON ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
+{
+ unsigned int pipe = urb->pipe;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
+ int state = urb_priv->state;
+ int etd_num = ep_priv->etd[0];
+ struct etd_priv *etd;
+ u32 count;
+ u16 etd_buf_size;
+ u16 maxpacket;
+ u8 dir;
+ u8 bufround;
+ u8 datatoggle;
+ u8 interval = 0;
+ u8 relpolpos = 0;
+
+ if (etd_num < 0) {
+ dev_err(imx21->dev, "No valid ETD\n");
+ return;
+ }
+ if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
+ dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
+
+ etd = &imx21->etd[etd_num];
+ maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+ if (!maxpacket)
+ maxpacket = 8;
+
+ if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
+ if (state == US_CTRL_SETUP) {
+ dir = TD_DIR_SETUP;
+ if (unsuitable_for_dma(urb->setup_dma))
+ usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
+ urb);
+ etd->dma_handle = urb->setup_dma;
+ etd->cpu_buffer = urb->setup_packet;
+ bufround = 0;
+ count = 8;
+ datatoggle = TD_TOGGLE_DATA0;
+ } else { /* US_CTRL_ACK */
+ dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
+ bufround = 0;
+ count = 0;
+ datatoggle = TD_TOGGLE_DATA1;
+ }
+ } else {
+ dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ bufround = (dir == TD_DIR_IN) ? 1 : 0;
+ if (unsuitable_for_dma(urb->transfer_dma))
+ usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
+
+ etd->dma_handle = urb->transfer_dma;
+ etd->cpu_buffer = urb->transfer_buffer;
+ if (usb_pipebulk(pipe) && (state == US_BULK0))
+ count = 0;
+ else
+ count = urb->transfer_buffer_length;
+
+ if (usb_pipecontrol(pipe)) {
+ datatoggle = TD_TOGGLE_DATA1;
+ } else {
+ if (usb_gettoggle(
+ urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe)))
+ datatoggle = TD_TOGGLE_DATA1;
+ else
+ datatoggle = TD_TOGGLE_DATA0;
+ }
+ }
+
+ etd->urb = urb;
+ etd->ep = urb_priv->ep;
+ etd->len = count;
+
+ if (usb_pipeint(pipe)) {
+ interval = urb->interval;
+ relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
+ }
+
+ /* Write ETD to device memory */
+ setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
+
+ etd_writel(imx21, etd_num, 2,
+ (u32) interval << DW2_POLINTERV |
+ ((u32) relpolpos << DW2_RELPOLPOS) |
+ ((u32) dir << DW2_DIRPID) |
+ ((u32) bufround << DW2_BUFROUND) |
+ ((u32) datatoggle << DW2_DATATOG) |
+ ((u32) TD_NOTACCESSED << DW2_COMPCODE));
+
+ /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
+ is smaller. Make sure we don't overrun the buffer!
+ */
+ if (count && count < maxpacket)
+ etd_buf_size = count;
+ else
+ etd_buf_size = maxpacket;
+
+ etd_writel(imx21, etd_num, 3,
+ ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
+
+ if (!count)
+ etd->dma_handle = 0;
+
+ /* allocate x and y buffer space at once */
+ etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
+ etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
+ if (etd->dmem_offset < 0) {
+ /* Setup everything we can in HW and update when we get DMEM */
+ etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
+
+ dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
+ debug_urb_queued_for_dmem(imx21, urb);
+ list_add_tail(&etd->queue, &imx21->queue_for_dmem);
+ return;
+ }
+
+ etd_writel(imx21, etd_num, 1,
+ (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
+ (u32) etd->dmem_offset);
+
+ urb_priv->active = 1;
+
+ /* enable the ETD to kick off transfer */
+ dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
+ etd_num, count, dir != TD_DIR_IN ? "out" : "in");
+ activate_etd(imx21, etd_num, dir);
+
+}
+
+static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct etd_priv *etd = &imx21->etd[etd_num];
+ struct urb *urb = etd->urb;
+ u32 etd_mask = 1 << etd_num;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int dir;
+ int cc;
+ u32 bytes_xfrd;
+ int etd_done;
+
+ disactivate_etd(imx21, etd_num);
+
+ dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
+ cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
+ bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
+
+ /* save toggle carry */
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe),
+ (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
+
+ if (dir == TD_DIR_IN) {
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+ if (etd->bounce_buffer) {
+ memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
+ dma_unmap_single(imx21->dev,
+ etd->dma_handle, etd->len, DMA_FROM_DEVICE);
+ } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
+ memcpy_fromio(etd->cpu_buffer,
+ imx21->regs + USBOTG_DMEM + etd->dmem_offset,
+ bytes_xfrd);
+ }
+ }
+
+ kfree(etd->bounce_buffer);
+ etd->bounce_buffer = NULL;
+ free_dmem(imx21, etd);
+
+ urb->error_count = 0;
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc != 0)
+ dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
+
+ etd_done = (cc_to_error[cc] != 0); /* stop if error */
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ switch (urb_priv->state) {
+ case US_CTRL_SETUP:
+ if (urb->transfer_buffer_length > 0)
+ urb_priv->state = US_CTRL_DATA;
+ else
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_DATA:
+ urb->actual_length += bytes_xfrd;
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_ACK:
+ etd_done = 1;
+ break;
+ default:
+ dev_err(imx21->dev,
+ "Invalid pipe state %d\n", urb_priv->state);
+ etd_done = 1;
+ break;
+ }
+ break;
+
+ case PIPE_BULK:
+ urb->actual_length += bytes_xfrd;
+ if ((urb_priv->state == US_BULK)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && urb->transfer_buffer_length > 0
+ && ((urb->transfer_buffer_length %
+ usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->pipe))) == 0)) {
+ /* need a 0-packet */
+ urb_priv->state = US_BULK0;
+ } else {
+ etd_done = 1;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ urb->actual_length += bytes_xfrd;
+ etd_done = 1;
+ break;
+ }
+
+ if (etd_done)
+ nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
+ else {
+ dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+}
+
+
+static struct ep_priv *alloc_ep(void)
+{
+ int i;
+ struct ep_priv *ep_priv;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (!ep_priv)
+ return NULL;
+
+ for (i = 0; i < NUM_ISO_ETDS; ++i)
+ ep_priv->etd[i] = -1;
+
+ return ep_priv;
+}
+
+static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct urb_priv *urb_priv;
+ struct ep_priv *ep_priv;
+ struct etd_priv *etd;
+ int ret;
+ unsigned long flags;
+
+ dev_vdbg(imx21->dev,
+ "enqueue urb=%p ep=%p len=%d "
+ "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
+ urb, ep,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma,
+ urb->setup_packet, urb->setup_dma);
+
+ if (usb_pipeisoc(urb->pipe))
+ return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ep_priv = ep->hcpriv;
+ if (ep_priv == NULL) {
+ ep_priv = alloc_ep();
+ if (!ep_priv) {
+ ret = -ENOMEM;
+ goto failed_alloc_ep;
+ }
+ ep->hcpriv = ep_priv;
+ ep_priv->ep = ep;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto failed_link;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ urb_priv->state = US_CTRL_SETUP;
+ break;
+ case PIPE_BULK:
+ urb_priv->state = US_BULK;
+ break;
+ }
+
+ debug_urb_submitted(imx21, urb);
+ if (ep_priv->etd[0] < 0) {
+ if (ep_priv->waiting_etd) {
+ dev_dbg(imx21->dev,
+ "no ETD available already queued %p\n",
+ ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ goto out;
+ }
+ ep_priv->etd[0] = alloc_etd(imx21);
+ if (ep_priv->etd[0] < 0) {
+ dev_dbg(imx21->dev,
+ "no ETD available queueing %p\n", ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
+ ep_priv->waiting_etd = 1;
+ goto out;
+ }
+ }
+
+ /* Schedule if no URB already active for this endpoint */
+ etd = &imx21->etd[ep_priv->etd[0]];
+ if (etd->urb == NULL) {
+ DEBUG_LOG_FRAME(imx21, etd, last_req);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+
+out:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+failed_link:
+failed_alloc_ep:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv);
+ return ret;
+}
+
+static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct usb_host_endpoint *ep;
+ struct ep_priv *ep_priv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int ret = -EINVAL;
+
+ dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
+ urb, usb_pipeisoc(urb->pipe), status);
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto fail;
+ ep = urb_priv->ep;
+ ep_priv = ep->hcpriv;
+
+ debug_urb_unlinked(imx21, urb);
+
+ if (usb_pipeisoc(urb->pipe)) {
+ dequeue_isoc_urb(imx21, urb, ep_priv);
+ schedule_isoc_etds(hcd, ep);
+ } else if (urb_priv->active) {
+ int etd_num = ep_priv->etd[0];
+ if (etd_num != -1) {
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+ disactivate_etd(imx21, etd_num);
+ free_dmem(imx21, etd);
+ etd->urb = NULL;
+ kfree(etd->bounce_buffer);
+ etd->bounce_buffer = NULL;
+ }
+ }
+
+ urb_done(hcd, urb, status);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+fail:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return ret;
+}
+
+/* =========================================== */
+/* Interrupt dispatch */
+/* =========================================== */
+
+static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
+{
+ int etd_num;
+ int enable_sof_int = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
+ u32 etd_mask = 1 << etd_num;
+ u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
+ u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+
+ if (done) {
+ DEBUG_LOG_FRAME(imx21, etd, last_int);
+ } else {
+/*
+ * Kludge warning!
+ *
+ * When multiple transfers are using the bus we sometimes get into a state
+ * where the transfer has completed (the CC field of the ETD is != 0x0F),
+ * the ETD has self disabled but the ETDDONESTAT flag is not set
+ * (and hence no interrupt occurs).
+ * This causes the transfer in question to hang.
+ * The kludge below checks for this condition at each SOF and processes any
+ * blocked ETDs (after an arbitrary 10 frame wait)
+ *
+ * With a single active transfer the usbtest test suite will run for days
+ * without the kludge.
+ * With other bus activity (eg mass storage) even just test1 will hang without
+ * the kludge.
+ */
+ u32 dword0;
+ int cc;
+
+ if (etd->active_count && !enabled) /* suspicious... */
+ enable_sof_int = 1;
+
+ if (!sof || enabled || !etd->active_count)
+ continue;
+
+ cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
+ if (cc == TD_NOTACCESSED)
+ continue;
+
+ if (++etd->active_count < 10)
+ continue;
+
+ dword0 = etd_readl(imx21, etd_num, 0);
+ dev_dbg(imx21->dev,
+ "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
+ etd_num, dword0 & 0x7F,
+ (dword0 >> DW0_ENDPNT) & 0x0F,
+ cc);
+
+#ifdef DEBUG
+ dev_dbg(imx21->dev,
+ "frame: act=%d disact=%d"
+ " int=%d req=%d cur=%d\n",
+ etd->activated_frame,
+ etd->disactivated_frame,
+ etd->last_int_frame,
+ etd->last_req_frame,
+ readl(imx21->regs + USBH_FRMNUB));
+ imx21->debug_unblocks++;
+#endif
+ etd->active_count = 0;
+/* End of kludge */
+ }
+
+ if (etd->ep == NULL || etd->urb == NULL) {
+ dev_dbg(imx21->dev,
+ "Interrupt for unexpected etd %d"
+ " ep=%p urb=%p\n",
+ etd_num, etd->ep, etd->urb);
+ disactivate_etd(imx21, etd_num);
+ continue;
+ }
+
+ if (usb_pipeisoc(etd->urb->pipe))
+ isoc_etd_done(hcd, etd_num);
+ else
+ nonisoc_etd_done(hcd, etd_num);
+ }
+
+ /* only enable SOF interrupt if it may be needed for the kludge */
+ if (enable_sof_int)
+ set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+ else
+ clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+static irqreturn_t imx21_irq(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ u32 ints = readl(imx21->regs + USBH_SYSISR);
+
+ if (ints & USBH_SYSIEN_HERRINT)
+ dev_dbg(imx21->dev, "Scheduling error\n");
+
+ if (ints & USBH_SYSIEN_SORINT)
+ dev_dbg(imx21->dev, "Scheduling overrun\n");
+
+ if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
+ process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
+
+ writel(ints, imx21->regs + USBH_SYSISR);
+ return IRQ_HANDLED;
+}
+
+static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ int i;
+
+ if (ep == NULL)
+ return;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ep_priv = ep->hcpriv;
+ dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
+
+ if (!list_empty(&ep->urb_list))
+ dev_dbg(imx21->dev, "ep's URB list is not empty\n");
+
+ if (ep_priv != NULL) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ if (ep_priv->etd[i] > -1)
+ dev_dbg(imx21->dev, "free etd %d for disable\n",
+ ep_priv->etd[i]);
+
+ free_etd(imx21, ep_priv->etd[i]);
+ }
+ kfree(ep_priv);
+ ep->hcpriv = NULL;
+ }
+
+ for (i = 0; i < USB_NUM_ETD; i++) {
+ if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
+ dev_err(imx21->dev,
+ "Active etd %d for disabled ep=%p!\n", i, ep);
+ free_etd(imx21, i);
+ }
+ }
+ free_epdmem(imx21, ep);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Hub handling */
+/* =========================================== */
+
+static int get_hub_descriptor(struct usb_hcd *hcd,
+ struct usb_hub_descriptor *desc)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ desc->bDescriptorType = 0x29; /* HUB descriptor */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ desc->bDescLength = 9;
+ desc->bPwrOn2PwrGood = 0;
+ desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
+ 0x0002 | /* No power switching */
+ 0x0010 | /* No over current protection */
+ 0);
+
+ desc->u.hs.DeviceRemovable[0] = 1 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
+ return 0;
+}
+
+static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int ports;
+ int changed = 0;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ports = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ if (ports > 7) {
+ ports = 7;
+ dev_err(imx21->dev, "ports %d > 7\n", ports);
+ }
+ for (i = 0; i < ports; i++) {
+ if (readl(imx21->regs + USBH_PORTSTAT(i)) &
+ (USBH_PORTSTAT_CONNECTSC |
+ USBH_PORTSTAT_PRTENBLSC |
+ USBH_PORTSTAT_PRTSTATSC |
+ USBH_PORTSTAT_OVRCURIC |
+ USBH_PORTSTAT_PRTRSTSC)) {
+
+ changed = 1;
+ buf[0] |= 1 << (i + 1);
+ }
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ if (changed)
+ dev_info(imx21->dev, "Hub status changed\n");
+ return changed;
+}
+
+static int imx21_hc_hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int rc = 0;
+ u32 status_write = 0;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ dev_dbg(imx21->dev, "ClearHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ case ClearPortFeature:
+ dev_dbg(imx21->dev, "ClearPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ dev_dbg(imx21->dev, " ENABLE\n");
+ status_write = USBH_PORTSTAT_CURCONST;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTOVRCURI;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_LSDEVCON;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ dev_dbg(imx21->dev, " C_ENABLE\n");
+ status_write = USBH_PORTSTAT_PRTENBLSC;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ dev_dbg(imx21->dev, " C_SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSTATSC;
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ dev_dbg(imx21->dev, " C_CONNECTION\n");
+ status_write = USBH_PORTSTAT_CONNECTSC;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
+ status_write = USBH_PORTSTAT_OVRCURIC;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ dev_dbg(imx21->dev, " C_RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTSC;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case GetHubDescriptor:
+ dev_dbg(imx21->dev, "GetHubDescriptor\n");
+ rc = get_hub_descriptor(hcd, (void *)buf);
+ break;
+
+ case GetHubStatus:
+ dev_dbg(imx21->dev, " GetHubStatus\n");
+ *(__le32 *) buf = 0;
+ break;
+
+ case GetPortStatus:
+ dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
+ wIndex, USBH_PORTSTAT(wIndex - 1));
+ *(__le32 *) buf = readl(imx21->regs +
+ USBH_PORTSTAT(wIndex - 1));
+ break;
+
+ case SetHubFeature:
+ dev_dbg(imx21->dev, "SetHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case SetPortFeature:
+ dev_dbg(imx21->dev, "SetPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSUSPST;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_PRTPWRST;
+ break;
+ case USB_PORT_FEAT_RESET:
+ dev_dbg(imx21->dev, " RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTST;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ if (status_write)
+ writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
+ return rc;
+}
+
+/* =========================================== */
+/* Host controller management */
+/* =========================================== */
+
+static int imx21_hc_reset(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ /* Reset the Host controller modules */
+ writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
+ USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
+ imx21->regs + USBOTG_RST_CTRL);
+
+ /* Wait for reset to finish */
+ timeout = jiffies + HZ;
+ while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
+ if (time_after(jiffies, timeout)) {
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ dev_err(imx21->dev, "timeout waiting for reset\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_irq(&imx21->lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock_irq(&imx21->lock);
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+}
+
+static int imx21_hc_start(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ int i, j;
+ u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
+ u32 usb_control = 0;
+
+ hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
+ USBOTG_HWMODE_HOSTXCVR_MASK);
+ hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
+ USBOTG_HWMODE_OTGXCVR_MASK);
+
+ if (imx21->pdata->host1_txenoe)
+ usb_control |= USBCTRL_HOST1_TXEN_OE;
+
+ if (!imx21->pdata->host1_xcverless)
+ usb_control |= USBCTRL_HOST1_BYP_TLL;
+
+ if (imx21->pdata->otg_ext_xcvr)
+ usb_control |= USBCTRL_OTC_RCV_RXDP;
+
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
+ imx21->regs + USBOTG_CLK_CTRL);
+ writel(hw_mode, imx21->regs + USBOTG_HWMODE);
+ writel(usb_control, imx21->regs + USBCTRL);
+ writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
+ imx21->regs + USB_MISCCONTROL);
+
+ /* Clear the ETDs */
+ for (i = 0; i < USB_NUM_ETD; i++)
+ for (j = 0; j < 4; j++)
+ etd_writel(imx21, i, j, 0);
+
+ /* Take the HC out of reset */
+ writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
+ imx21->regs + USBH_HOST_CTRL);
+
+ /* Enable ports */
+ if (imx21->pdata->enable_otg_host)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(0));
+
+ if (imx21->pdata->enable_host1)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(1));
+
+ if (imx21->pdata->enable_host2)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(2));
+
+
+ hcd->state = HC_STATE_RUNNING;
+
+ /* Enable host controller interrupts */
+ set_register_bits(imx21, USBH_SYSIEN,
+ USBH_SYSIEN_HERRINT |
+ USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
+ set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void imx21_hc_stop(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel(0, imx21->regs + USBH_SYSIEN);
+ clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+ clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
+ USBOTG_CLK_CTRL);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Driver glue */
+/* =========================================== */
+
+static struct hc_driver imx21_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "IMX21 USB Host Controller",
+ .hcd_priv_size = sizeof(struct imx21),
+
+ .flags = HCD_USB11,
+ .irq = imx21_irq,
+
+ .reset = imx21_hc_reset,
+ .start = imx21_hc_start,
+ .stop = imx21_hc_stop,
+
+ /* I/O requests */
+ .urb_enqueue = imx21_hc_urb_enqueue,
+ .urb_dequeue = imx21_hc_urb_dequeue,
+ .endpoint_disable = imx21_hc_endpoint_disable,
+
+ /* scheduling support */
+ .get_frame_number = imx21_hc_get_frame,
+
+ /* Root hub support */
+ .hub_status_data = imx21_hc_hub_status_data,
+ .hub_control = imx21_hc_hub_control,
+
+};
+
+static struct mx21_usbh_platform_data default_pdata = {
+ .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .enable_host1 = 1,
+ .enable_host2 = 1,
+ .enable_otg_host = 1,
+
+};
+
+static int imx21_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ remove_debug_files(imx21);
+ usb_remove_hcd(hcd);
+
+ if (res != NULL) {
+ clk_disable_unprepare(imx21->clk);
+ clk_put(imx21->clk);
+ iounmap(imx21->regs);
+ release_mem_region(res->start, resource_size(res));
+ }
+
+ kfree(hcd);
+ return 0;
+}
+
+
+static int imx21_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct imx21 *imx21;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&imx21_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (hcd == NULL) {
+ dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
+ dev_name(&pdev->dev));
+ return -ENOMEM;
+ }
+
+ imx21 = hcd_to_imx21(hcd);
+ imx21->hcd = hcd;
+ imx21->dev = &pdev->dev;
+ imx21->pdata = dev_get_platdata(&pdev->dev);
+ if (!imx21->pdata)
+ imx21->pdata = &default_pdata;
+
+ spin_lock_init(&imx21->lock);
+ INIT_LIST_HEAD(&imx21->dmem_list);
+ INIT_LIST_HEAD(&imx21->queue_for_etd);
+ INIT_LIST_HEAD(&imx21->queue_for_dmem);
+ create_debug_files(imx21);
+
+ res = request_mem_region(res->start, resource_size(res), hcd_name);
+ if (!res) {
+ ret = -EBUSY;
+ goto failed_request_mem;
+ }
+
+ imx21->regs = ioremap(res->start, resource_size(res));
+ if (imx21->regs == NULL) {
+ dev_err(imx21->dev, "Cannot map registers\n");
+ ret = -ENOMEM;
+ goto failed_ioremap;
+ }
+
+ /* Enable clocks source */
+ imx21->clk = clk_get(imx21->dev, NULL);
+ if (IS_ERR(imx21->clk)) {
+ dev_err(imx21->dev, "no clock found\n");
+ ret = PTR_ERR(imx21->clk);
+ goto failed_clock_get;
+ }
+
+ ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
+ if (ret)
+ goto failed_clock_set;
+ ret = clk_prepare_enable(imx21->clk);
+ if (ret)
+ goto failed_clock_enable;
+
+ dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
+ (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
+
+ ret = usb_add_hcd(hcd, irq, 0);
+ if (ret != 0) {
+ dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
+ goto failed_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return 0;
+
+failed_add_hcd:
+ clk_disable_unprepare(imx21->clk);
+failed_clock_enable:
+failed_clock_set:
+ clk_put(imx21->clk);
+failed_clock_get:
+ iounmap(imx21->regs);
+failed_ioremap:
+ release_mem_region(res->start, resource_size(res));
+failed_request_mem:
+ remove_debug_files(imx21);
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static struct platform_driver imx21_hcd_driver = {
+ .driver = {
+ .name = hcd_name,
+ },
+ .probe = imx21_probe,
+ .remove = imx21_remove,
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+module_platform_driver(imx21_hcd_driver);
+
+MODULE_DESCRIPTION("i.MX21 USB Host controller");
+MODULE_AUTHOR("Martin Fuzzey");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 00000000000..05122f8a698
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,444 @@
+/*
+ * Macros and prototypes for i.MX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_IMX21_HCD_H__
+#define __LINUX_IMX21_HCD_H__
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/platform_data/usb-mx2.h>
+
+#define NUM_ISO_ETDS 2
+#define USB_NUM_ETD 32
+#define DMEM_SIZE 4096
+
+/* Register definitions */
+#define USBOTG_HWMODE 0x00
+#define USBOTG_HWMODE_ANASDBEN (1 << 14)
+#define USBOTG_HWMODE_OTGXCVR_SHIFT 6
+#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6)
+#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4
+#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4)
+#define USBOTG_HWMODE_CRECFG_MASK (3 << 0)
+#define USBOTG_HWMODE_CRECFG_HOST (1 << 0)
+#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0)
+#define USBOTG_HWMODE_CRECFG_HNP (3 << 0)
+
+#define USBOTG_CINT_STAT 0x04
+#define USBOTG_CINT_STEN 0x08
+#define USBOTG_ASHNPINT (1 << 5)
+#define USBOTG_ASFCINT (1 << 4)
+#define USBOTG_ASHCINT (1 << 3)
+#define USBOTG_SHNPINT (1 << 2)
+#define USBOTG_FCINT (1 << 1)
+#define USBOTG_HCINT (1 << 0)
+
+#define USBOTG_CLK_CTRL 0x0c
+#define USBOTG_CLK_CTRL_FUNC (1 << 2)
+#define USBOTG_CLK_CTRL_HST (1 << 1)
+#define USBOTG_CLK_CTRL_MAIN (1 << 0)
+
+#define USBOTG_RST_CTRL 0x10
+#define USBOTG_RST_RSTI2C (1 << 15)
+#define USBOTG_RST_RSTCTRL (1 << 5)
+#define USBOTG_RST_RSTFC (1 << 4)
+#define USBOTG_RST_RSTFSKE (1 << 3)
+#define USBOTG_RST_RSTRH (1 << 2)
+#define USBOTG_RST_RSTHSIE (1 << 1)
+#define USBOTG_RST_RSTHC (1 << 0)
+
+#define USBOTG_FRM_INTVL 0x14
+#define USBOTG_FRM_REMAIN 0x18
+#define USBOTG_HNP_CSR 0x1c
+#define USBOTG_HNP_ISR 0x2c
+#define USBOTG_HNP_IEN 0x30
+
+#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x))
+#define USBOTG_I2C_XCVR_DEVAD 0x118
+#define USBOTG_I2C_SEQ_OP_REG 0x119
+#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a
+#define USBOTG_I2C_OP_CTRL_REG 0x11b
+#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e
+#define USBOTG_I2C_MASTER_INT_REG 0x11f
+
+#define USBH_HOST_CTRL 0x80
+#define USBH_HOST_CTRL_HCRESET (1 << 31)
+#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16)
+#define USBH_HOST_CTRL_RMTWUEN (1 << 4)
+#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2)
+#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0)
+
+#define USBH_SYSISR 0x88
+#define USBH_SYSISR_PSCINT (1 << 6)
+#define USBH_SYSISR_FMOFINT (1 << 5)
+#define USBH_SYSISR_HERRINT (1 << 4)
+#define USBH_SYSISR_RESDETINT (1 << 3)
+#define USBH_SYSISR_SOFINT (1 << 2)
+#define USBH_SYSISR_DONEINT (1 << 1)
+#define USBH_SYSISR_SORINT (1 << 0)
+
+#define USBH_SYSIEN 0x8c
+#define USBH_SYSIEN_PSCINT (1 << 6)
+#define USBH_SYSIEN_FMOFINT (1 << 5)
+#define USBH_SYSIEN_HERRINT (1 << 4)
+#define USBH_SYSIEN_RESDETINT (1 << 3)
+#define USBH_SYSIEN_SOFINT (1 << 2)
+#define USBH_SYSIEN_DONEINT (1 << 1)
+#define USBH_SYSIEN_SORINT (1 << 0)
+
+#define USBH_XBUFSTAT 0x98
+#define USBH_YBUFSTAT 0x9c
+#define USBH_XYINTEN 0xa0
+#define USBH_XFILLSTAT 0xa8
+#define USBH_YFILLSTAT 0xac
+#define USBH_ETDENSET 0xc0
+#define USBH_ETDENCLR 0xc4
+#define USBH_IMMEDINT 0xcc
+#define USBH_ETDDONESTAT 0xd0
+#define USBH_ETDDONEEN 0xd4
+#define USBH_FRMNUB 0xe0
+#define USBH_LSTHRESH 0xe4
+
+#define USBH_ROOTHUBA 0xe8
+#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff)
+#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24)
+#define USBH_ROOTHUBA_NOOVRCURP (1 << 12)
+#define USBH_ROOTHUBA_OVRCURPM (1 << 11)
+#define USBH_ROOTHUBA_DEVTYPE (1 << 10)
+#define USBH_ROOTHUBA_PWRSWTMD (1 << 9)
+#define USBH_ROOTHUBA_NOPWRSWT (1 << 8)
+#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff)
+
+#define USBH_ROOTHUBB 0xec
+#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16))
+#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x))
+
+#define USBH_ROOTSTAT 0xf0
+#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31)
+#define USBH_ROOTSTAT_OVRCURCHG (1 << 17)
+#define USBH_ROOTSTAT_DEVCONWUE (1 << 15)
+#define USBH_ROOTSTAT_OVRCURI (1 << 1)
+#define USBH_ROOTSTAT_LOCPWRS (1 << 0)
+
+#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4))
+#define USBH_PORTSTAT_PRTRSTSC (1 << 20)
+#define USBH_PORTSTAT_OVRCURIC (1 << 19)
+#define USBH_PORTSTAT_PRTSTATSC (1 << 18)
+#define USBH_PORTSTAT_PRTENBLSC (1 << 17)
+#define USBH_PORTSTAT_CONNECTSC (1 << 16)
+#define USBH_PORTSTAT_LSDEVCON (1 << 9)
+#define USBH_PORTSTAT_PRTPWRST (1 << 8)
+#define USBH_PORTSTAT_PRTRSTST (1 << 4)
+#define USBH_PORTSTAT_PRTOVRCURI (1 << 3)
+#define USBH_PORTSTAT_PRTSUSPST (1 << 2)
+#define USBH_PORTSTAT_PRTENABST (1 << 1)
+#define USBH_PORTSTAT_CURCONST (1 << 0)
+
+#define USB_DMAREV 0x800
+#define USB_DMAINTSTAT 0x804
+#define USB_DMAINTSTAT_EPERR (1 << 1)
+#define USB_DMAINTSTAT_ETDERR (1 << 0)
+
+#define USB_DMAINTEN 0x808
+#define USB_DMAINTEN_EPERRINTEN (1 << 1)
+#define USB_DMAINTEN_ETDERRINTEN (1 << 0)
+
+#define USB_ETDDMAERSTAT 0x80c
+#define USB_EPDMAERSTAT 0x810
+#define USB_ETDDMAEN 0x820
+#define USB_EPDMAEN 0x824
+#define USB_ETDDMAXTEN 0x828
+#define USB_EPDMAXTEN 0x82c
+#define USB_ETDDMAENXYT 0x830
+#define USB_EPDMAENXYT 0x834
+#define USB_ETDDMABST4EN 0x838
+#define USB_EPDMABST4EN 0x83c
+
+#define USB_MISCCONTROL 0x840
+#define USB_MISCCONTROL_ISOPREVFRM (1 << 3)
+#define USB_MISCCONTROL_SKPRTRY (1 << 2)
+#define USB_MISCCONTROL_ARBMODE (1 << 1)
+#define USB_MISCCONTROL_FILTCC (1 << 0)
+
+#define USB_ETDDMACHANLCLR 0x848
+#define USB_EPDMACHANLCLR 0x84c
+#define USB_ETDSMSA(x) (0x900 + ((x) * 4))
+#define USB_EPSMSA(x) (0x980 + ((x) * 4))
+#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4))
+#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4))
+
+#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4))
+#define DW0_ADDRESS 0
+#define DW0_ENDPNT 7
+#define DW0_DIRECT 11
+#define DW0_SPEED 13
+#define DW0_FORMAT 14
+#define DW0_MAXPKTSIZ 16
+#define DW0_HALTED 27
+#define DW0_TOGCRY 28
+#define DW0_SNDNAK 30
+
+#define DW1_XBUFSRTAD 0
+#define DW1_YBUFSRTAD 16
+
+#define DW2_RTRYDELAY 0
+#define DW2_POLINTERV 0
+#define DW2_STARTFRM 0
+#define DW2_RELPOLPOS 8
+#define DW2_DIRPID 16
+#define DW2_BUFROUND 18
+#define DW2_DELAYINT 19
+#define DW2_DATATOG 22
+#define DW2_ERRORCNT 24
+#define DW2_COMPCODE 28
+
+#define DW3_TOTBYECNT 0
+#define DW3_PKTLEN0 0
+#define DW3_COMPCODE0 12
+#define DW3_PKTLEN1 16
+#define DW3_BUFSIZE 21
+#define DW3_COMPCODE1 28
+
+#define USBCTRL 0x600
+#define USBCTRL_I2C_WU_INT_STAT (1 << 27)
+#define USBCTRL_OTG_WU_INT_STAT (1 << 26)
+#define USBCTRL_HOST_WU_INT_STAT (1 << 25)
+#define USBCTRL_FNT_WU_INT_STAT (1 << 24)
+#define USBCTRL_I2C_WU_INT_EN (1 << 19)
+#define USBCTRL_OTG_WU_INT_EN (1 << 18)
+#define USBCTRL_HOST_WU_INT_EN (1 << 17)
+#define USBCTRL_FNT_WU_INT_EN (1 << 16)
+#define USBCTRL_OTC_RCV_RXDP (1 << 13)
+#define USBCTRL_HOST1_BYP_TLL (1 << 12)
+#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10)
+#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8)
+#define USBCTRL_OTG_PWR_MASK (1 << 6)
+#define USBCTRL_HOST1_PWR_MASK (1 << 5)
+#define USBCTRL_HOST2_PWR_MASK (1 << 4)
+#define USBCTRL_USB_BYP (1 << 2)
+#define USBCTRL_HOST1_TXEN_OE (1 << 1)
+
+#define USBOTG_DMEM 0x1000
+
+/* Values in TD blocks */
+#define TD_DIR_SETUP 0
+#define TD_DIR_OUT 1
+#define TD_DIR_IN 2
+#define TD_FORMAT_CONTROL 0
+#define TD_FORMAT_ISO 1
+#define TD_FORMAT_BULK 2
+#define TD_FORMAT_INT 3
+#define TD_TOGGLE_CARRY 0
+#define TD_TOGGLE_DATA0 2
+#define TD_TOGGLE_DATA1 3
+
+/* control transfer states */
+#define US_CTRL_SETUP 2
+#define US_CTRL_DATA 1
+#define US_CTRL_ACK 0
+
+/* bulk transfer main state and 0-length packet */
+#define US_BULK 1
+#define US_BULK0 0
+
+/*ETD format description*/
+#define IMX_FMT_CTRL 0x0
+#define IMX_FMT_ISO 0x1
+#define IMX_FMT_BULK 0x2
+#define IMX_FMT_INT 0x3
+
+static char fmt_urb_to_etd[4] = {
+/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
+/*PIPE_INTERRUPT*/ IMX_FMT_INT,
+/*PIPE_CONTROL*/ IMX_FMT_CTRL,
+/*PIPE_BULK*/ IMX_FMT_BULK
+};
+
+/* condition (error) CC codes and mapping (OHCI like) */
+
+#define TD_CC_NOERROR 0x00
+#define TD_CC_CRC 0x01
+#define TD_CC_BITSTUFFING 0x02
+#define TD_CC_DATATOGGLEM 0x03
+#define TD_CC_STALL 0x04
+#define TD_DEVNOTRESP 0x05
+#define TD_PIDCHECKFAIL 0x06
+/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/
+#define TD_DATAOVERRUN 0x08
+#define TD_DATAUNDERRUN 0x09
+#define TD_BUFFEROVERRUN 0x0C
+#define TD_BUFFERUNDERRUN 0x0D
+#define TD_SCHEDULEOVERRUN 0x0E
+#define TD_NOTACCESSED 0x0F
+
+static const int cc_to_error[16] = {
+ /* No Error */ 0,
+ /* CRC Error */ -EILSEQ,
+ /* Bit Stuff */ -EPROTO,
+ /* Data Togg */ -EILSEQ,
+ /* Stall */ -EPIPE,
+ /* DevNotResp */ -ETIMEDOUT,
+ /* PIDCheck */ -EPROTO,
+ /* UnExpPID */ -EPROTO,
+ /* DataOver */ -EOVERFLOW,
+ /* DataUnder */ -EREMOTEIO,
+ /* (for hw) */ -EIO,
+ /* (for hw) */ -EIO,
+ /* BufferOver */ -ECOMM,
+ /* BuffUnder */ -ENOSR,
+ /* (for HCD) */ -ENOSPC,
+ /* (for HCD) */ -EALREADY
+};
+
+/* HCD data associated with a usb core URB */
+struct urb_priv {
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ int active;
+ int state;
+ struct td *isoc_td;
+ int isoc_remaining;
+ int isoc_status;
+};
+
+/* HCD data associated with a usb core endpoint */
+struct ep_priv {
+ struct usb_host_endpoint *ep;
+ struct list_head td_list;
+ struct list_head queue;
+ int etd[NUM_ISO_ETDS];
+ int waiting_etd;
+};
+
+/* isoc packet */
+struct td {
+ struct list_head list;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ dma_addr_t dma_handle;
+ void *cpu_buffer;
+ int len;
+ int frame;
+ int isoc_index;
+};
+
+/* HCD data associated with a hardware ETD */
+struct etd_priv {
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ struct td *td;
+ struct list_head queue;
+ dma_addr_t dma_handle;
+ void *cpu_buffer;
+ void *bounce_buffer;
+ int alloc;
+ int len;
+ int dmem_size;
+ int dmem_offset;
+ int active_count;
+#ifdef DEBUG
+ int activated_frame;
+ int disactivated_frame;
+ int last_int_frame;
+ int last_req_frame;
+ u32 submitted_dwords[4];
+#endif
+};
+
+/* Hardware data memory info */
+struct imx21_dmem_area {
+ struct usb_host_endpoint *ep;
+ unsigned int offset;
+ unsigned int size;
+ struct list_head list;
+};
+
+#ifdef DEBUG
+struct debug_usage_stats {
+ unsigned int value;
+ unsigned int maximum;
+};
+
+struct debug_stats {
+ unsigned long submitted;
+ unsigned long completed_ok;
+ unsigned long completed_failed;
+ unsigned long unlinked;
+ unsigned long queue_etd;
+ unsigned long queue_dmem;
+};
+
+struct debug_isoc_trace {
+ int schedule_frame;
+ int submit_frame;
+ int request_len;
+ int done_frame;
+ int done_len;
+ int cc;
+ struct td *td;
+};
+#endif
+
+/* HCD data structure */
+struct imx21 {
+ spinlock_t lock;
+ struct device *dev;
+ struct usb_hcd *hcd;
+ struct mx21_usbh_platform_data *pdata;
+ struct list_head dmem_list;
+ struct list_head queue_for_etd; /* eps queued due to etd shortage */
+ struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
+ struct etd_priv etd[USB_NUM_ETD];
+ struct clk *clk;
+ void __iomem *regs;
+#ifdef DEBUG
+ struct dentry *debug_root;
+ struct debug_stats nonisoc_stats;
+ struct debug_stats isoc_stats;
+ struct debug_usage_stats etd_usage;
+ struct debug_usage_stats dmem_usage;
+ struct debug_isoc_trace isoc_trace[20];
+ struct debug_isoc_trace isoc_trace_failed[20];
+ unsigned long debug_unblocks;
+ int isoc_trace_index;
+ int isoc_trace_index_failed;
+#endif
+};
+
+#endif
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 4dda31b2689..240e792c81a 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -60,18 +60,17 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/isp116x.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
#include "isp116x.h"
#define DRIVER_VERSION "03 Nov 2005"
@@ -611,6 +610,7 @@ static irqreturn_t isp116x_irq(struct usb_hcd *hcd)
/* IRQ's are off, we do no DMA,
perfectly ready to die ... */
hcd->state = HC_STATE_HALT;
+ usb_hc_died(hcd);
ret = IRQ_HANDLED;
goto done;
}
@@ -772,7 +772,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
break;
case PIPE_INTERRUPT:
urb->interval = ep->period;
- ep->length = min((int)ep->maxpacket,
+ ep->length = min_t(u32, ep->maxpacket,
urb->transfer_buffer_length);
/* urb submitted for already existing endpoint */
@@ -950,9 +950,9 @@ static void isp116x_hub_descriptor(struct isp116x *isp116x,
/* Power switching, device type, overcurrent. */
desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) & 0x1f));
desc->bPwrOn2PwrGood = (u8) ((reg >> 24) & 0xff);
- /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
- desc->bitmap[0] = 0;
- desc->bitmap[1] = ~0;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = ~0;
}
/* Perform reset of a given port.
@@ -1556,9 +1556,7 @@ static int isp116x_remove(struct platform_device *pdev)
return 0;
}
-#define resource_len(r) (((r)->end - (r)->start) + 1)
-
-static int __devinit isp116x_probe(struct platform_device *pdev)
+static int isp116x_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp116x *isp116x;
@@ -1569,6 +1567,9 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
int ret = 0;
unsigned long irqflags;
+ if (usb_disabled())
+ return -ENODEV;
+
if (pdev->num_resources < 3) {
ret = -ENODEV;
goto err1;
@@ -1596,7 +1597,7 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
ret = -EBUSY;
goto err1;
}
- addr_reg = ioremap(addr->start, resource_len(addr));
+ addr_reg = ioremap(addr->start, resource_size(addr));
if (addr_reg == NULL) {
ret = -ENOMEM;
goto err2;
@@ -1605,7 +1606,7 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
ret = -EBUSY;
goto err3;
}
- data_reg = ioremap(data->start, resource_len(data));
+ data_reg = ioremap(data->start, resource_size(data));
if (data_reg == NULL) {
ret = -ENOMEM;
goto err4;
@@ -1624,7 +1625,7 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
isp116x->addr_reg = addr_reg;
spin_lock_init(&isp116x->lock);
INIT_LIST_HEAD(&isp116x->async);
- isp116x->board = pdev->dev.platform_data;
+ isp116x->board = dev_get_platdata(&pdev->dev);
if (!isp116x->board) {
ERR("Platform data structure not initialized\n");
@@ -1639,10 +1640,12 @@ static int __devinit isp116x_probe(struct platform_device *pdev)
goto err6;
}
- ret = usb_add_hcd(hcd, irq, irqflags | IRQF_DISABLED);
+ ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
ret = create_debug_file(isp116x);
if (ret) {
ERR("Couldn't create debugfs entry\n");
@@ -1703,27 +1706,9 @@ static struct platform_driver isp116x_driver = {
.suspend = isp116x_suspend,
.resume = isp116x_resume,
.driver = {
- .name = (char *)hcd_name,
+ .name = hcd_name,
.owner = THIS_MODULE,
},
};
-/*-----------------------------------------------------------------*/
-
-static int __init isp116x_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- INFO("driver %s, %s\n", hcd_name, DRIVER_VERSION);
- return platform_driver_register(&isp116x_driver);
-}
-
-module_init(isp116x_init);
-
-static void __exit isp116x_cleanup(void)
-{
- platform_driver_unregister(&isp116x_driver);
-}
-
-module_exit(isp116x_cleanup);
+module_platform_driver(isp116x_driver);
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h
index aa211bafcff..dd34b7a3396 100644
--- a/drivers/usb/host/isp116x.h
+++ b/drivers/usb/host/isp116x.h
@@ -13,7 +13,7 @@
/* Full speed: max # of bytes to transfer for a single urb
at a time must be < 1024 && must be multiple of 64.
- 832 allows transfering 4kiB within 5 frames. */
+ 832 allows transferring 4kiB within 5 frames. */
#define MAX_TRANSFER_SIZE_FULLSPEED 832
/* Low speed: there is no reason to schedule in very big
@@ -325,11 +325,7 @@ struct isp116x_ep {
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(stuff...) printk(KERN_DEBUG "116x: " stuff)
-#else
-#define DBG(stuff...) do{}while(0)
-#endif
+#define DBG(stuff...) pr_debug("116x: " stuff)
#ifdef VERBOSE
# define VDBG DBG
@@ -358,15 +354,8 @@ struct isp116x_ep {
#define isp116x_check_platform_delay(h) 0
#endif
-#if defined(DEBUG)
-#define IRQ_TEST() BUG_ON(!irqs_disabled())
-#else
-#define IRQ_TEST() do{}while(0)
-#endif
-
static inline void isp116x_write_addr(struct isp116x *isp116x, unsigned reg)
{
- IRQ_TEST();
writew(reg & 0xff, isp116x->addr_reg);
isp116x_delay(isp116x, 300);
}
@@ -563,7 +552,7 @@ static void urb_dbg(struct urb *urb, char *msg)
*/
static inline void dump_ptd(struct ptd *ptd)
{
- printk("td: %x %d%c%d %d,%d,%d %x %x%x%x\n",
+ printk(KERN_WARNING "td: %x %d%c%d %d,%d,%d %x %x%x%x\n",
PTD_GET_CC(ptd), PTD_GET_FA(ptd),
PTD_DIR_STR(ptd), PTD_GET_EP(ptd),
PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd),
@@ -576,7 +565,7 @@ static inline void dump_ptd_out_data(struct ptd *ptd, u8 * buf)
int k;
if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) {
- printk("-> ");
+ printk(KERN_WARNING "-> ");
for (k = 0; k < PTD_GET_LEN(ptd); ++k)
printk("%02x ", ((u8 *) buf)[k]);
printk("\n");
@@ -588,13 +577,13 @@ static inline void dump_ptd_in_data(struct ptd *ptd, u8 * buf)
int k;
if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) {
- printk("<- ");
+ printk(KERN_WARNING "<- ");
for (k = 0; k < PTD_GET_COUNT(ptd); ++k)
printk("%02x ", ((u8 *) buf)[k]);
printk("\n");
}
if (PTD_GET_LAST(ptd))
- printk("-\n");
+ printk(KERN_WARNING "-\n");
}
#else
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
new file mode 100644
index 00000000000..875bcfd3ec1
--- /dev/null
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -0,0 +1,2838 @@
+/*
+ * ISP1362 HCD (Host Controller Driver) for USB.
+ *
+ * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
+ *
+ * Derived from the SL811 HCD, rewritten for ISP116x.
+ * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
+ *
+ * Portions:
+ * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Copyright (C) 2004 David Brownell
+ */
+
+/*
+ * The ISP1362 chip requires a large delay (300ns and 462ns) between
+ * accesses to the address and data register.
+ * The following timing options exist:
+ *
+ * 1. Configure your memory controller to add such delays if it can (the best)
+ * 2. Implement platform-specific delay function possibly
+ * combined with configuring the memory controller; see
+ * include/linux/usb_isp1362.h for more info.
+ * 3. Use ndelay (easiest, poorest).
+ *
+ * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
+ * platform specific section of isp1362.h to select the appropriate variant.
+ *
+ * Also note that according to the Philips "ISP1362 Errata" document
+ * Rev 1.00 from 27 May data corruption may occur when the #WR signal
+ * is reasserted (even with #CS deasserted) within 132ns after a
+ * write cycle to any controller register. If the hardware doesn't
+ * implement the recommended fix (gating the #WR with #CS) software
+ * must ensure that no further write cycle (not necessarily to the chip!)
+ * is issued by the CPU within this interval.
+
+ * For PXA25x this can be ensured by using VLIO with the maximum
+ * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
+ */
+
+#undef ISP1362_DEBUG
+
+/*
+ * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
+ * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
+ * requests are carried out in separate frames. This will delay any SETUP
+ * packets until the start of the next frame so that this situation is
+ * unlikely to occur (and makes usbtest happy running with a PXA255 target
+ * device).
+ */
+#undef BUGGY_PXA2XX_UDC_USBTEST
+
+#undef PTD_TRACE
+#undef URB_TRACE
+#undef VERBOSE
+#undef REGISTERS
+
+/* This enables a memory test on the ISP1362 chip memory to make sure the
+ * chip access timing is correct.
+ */
+#undef CHIP_BUFFER_TEST
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/isp1362.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/bitmap.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+static int dbg_level;
+#ifdef ISP1362_DEBUG
+module_param(dbg_level, int, 0644);
+#else
+module_param(dbg_level, int, 0);
+#endif
+
+#include "../core/usb.h"
+#include "isp1362.h"
+
+
+#define DRIVER_VERSION "2005-04-04"
+#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+static const char hcd_name[] = "isp1362-hcd";
+
+static void isp1362_hc_stop(struct usb_hcd *hcd);
+static int isp1362_hc_start(struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
+ * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
+ * completion.
+ * We don't need a 'disable' counterpart, since interrupts will be disabled
+ * only by the interrupt handler.
+ */
+static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
+{
+ if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
+ return;
+ if (mask & ~isp1362_hcd->irqenb)
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
+ isp1362_hcd->irqenb |= mask;
+ if (isp1362_hcd->irq_active)
+ return;
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
+ u16 offset)
+{
+ struct isp1362_ep_queue *epq = NULL;
+
+ if (offset < isp1362_hcd->istl_queue[1].buf_start)
+ epq = &isp1362_hcd->istl_queue[0];
+ else if (offset < isp1362_hcd->intl_queue.buf_start)
+ epq = &isp1362_hcd->istl_queue[1];
+ else if (offset < isp1362_hcd->atl_queue.buf_start)
+ epq = &isp1362_hcd->intl_queue;
+ else if (offset < isp1362_hcd->atl_queue.buf_start +
+ isp1362_hcd->atl_queue.buf_size)
+ epq = &isp1362_hcd->atl_queue;
+
+ if (epq)
+ DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
+ else
+ pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
+
+ return epq;
+}
+
+static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
+{
+ int offset;
+
+ if (index * epq->blk_size > epq->buf_size) {
+ pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
+ epq->buf_size / epq->blk_size);
+ return -EINVAL;
+ }
+ offset = epq->buf_start + index * epq->blk_size;
+ DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
+
+ return offset;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
+ int mps)
+{
+ u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
+
+ xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
+ if (xfer_size < size && xfer_size % mps)
+ xfer_size -= xfer_size % mps;
+
+ return xfer_size;
+}
+
+static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
+ struct isp1362_ep *ep, u16 len)
+{
+ int ptd_offset = -EINVAL;
+ int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
+ int found;
+
+ BUG_ON(len > epq->buf_size);
+
+ if (!epq->buf_avail)
+ return -ENOMEM;
+
+ if (ep->num_ptds)
+ pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
+ epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
+ BUG_ON(ep->num_ptds != 0);
+
+ found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
+ num_ptds, 0);
+ if (found >= epq->buf_count)
+ return -EOVERFLOW;
+
+ DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
+ num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
+ ptd_offset = get_ptd_offset(epq, found);
+ WARN_ON(ptd_offset < 0);
+ ep->ptd_offset = ptd_offset;
+ ep->num_ptds += num_ptds;
+ epq->buf_avail -= num_ptds;
+ BUG_ON(epq->buf_avail > epq->buf_count);
+ ep->ptd_index = found;
+ bitmap_set(&epq->buf_map, found, num_ptds);
+ DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
+ __func__, epq->name, ep->ptd_index, ep->ptd_offset,
+ epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
+
+ return found;
+}
+
+static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
+{
+ int last = ep->ptd_index + ep->num_ptds;
+
+ if (last > epq->buf_count)
+ pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
+ __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
+ ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
+ epq->buf_map, epq->skip_map);
+ BUG_ON(last > epq->buf_count);
+
+ bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
+ bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
+ epq->buf_avail += ep->num_ptds;
+ epq->ptd_count--;
+
+ BUG_ON(epq->buf_avail > epq->buf_count);
+ BUG_ON(epq->ptd_count > epq->buf_count);
+
+ DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
+ __func__, epq->name,
+ ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
+ DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
+ epq->buf_map, epq->skip_map);
+
+ ep->num_ptds = 0;
+ ep->ptd_offset = -EINVAL;
+ ep->ptd_index = -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ Set up PTD's.
+*/
+static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
+ struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
+ u16 fno)
+{
+ struct ptd *ptd;
+ int toggle;
+ int dir;
+ u16 len;
+ size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
+
+ DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
+
+ ptd = &ep->ptd;
+
+ ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
+
+ switch (ep->nextpid) {
+ case USB_PID_IN:
+ toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
+ dir = PTD_DIR_IN;
+ if (usb_pipecontrol(urb->pipe)) {
+ len = min_t(size_t, ep->maxpacket, buf_len);
+ } else if (usb_pipeisoc(urb->pipe)) {
+ len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
+ ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
+ } else
+ len = max_transfer_size(epq, buf_len, ep->maxpacket);
+ DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
+ (int)buf_len);
+ break;
+ case USB_PID_OUT:
+ toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
+ dir = PTD_DIR_OUT;
+ if (usb_pipecontrol(urb->pipe))
+ len = min_t(size_t, ep->maxpacket, buf_len);
+ else if (usb_pipeisoc(urb->pipe))
+ len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
+ else
+ len = max_transfer_size(epq, buf_len, ep->maxpacket);
+ if (len == 0)
+ pr_info("%s: Sending ZERO packet: %d\n", __func__,
+ urb->transfer_flags & URB_ZERO_PACKET);
+ DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
+ (int)buf_len);
+ break;
+ case USB_PID_SETUP:
+ toggle = 0;
+ dir = PTD_DIR_SETUP;
+ len = sizeof(struct usb_ctrlrequest);
+ DBG(1, "%s: SETUP len %d\n", __func__, len);
+ ep->data = urb->setup_packet;
+ break;
+ case USB_PID_ACK:
+ toggle = 1;
+ len = 0;
+ dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
+ PTD_DIR_OUT : PTD_DIR_IN;
+ DBG(1, "%s: ACK len %d\n", __func__, len);
+ break;
+ default:
+ toggle = dir = len = 0;
+ pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
+ BUG_ON(1);
+ }
+
+ ep->length = len;
+ if (!len)
+ ep->data = NULL;
+
+ ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
+ ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
+ PTD_EP(ep->epnum);
+ ptd->len = PTD_LEN(len) | PTD_DIR(dir);
+ ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
+
+ if (usb_pipeint(urb->pipe)) {
+ ptd->faddr |= PTD_SF_INT(ep->branch);
+ ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
+ }
+ if (usb_pipeisoc(urb->pipe))
+ ptd->faddr |= PTD_SF_ISO(fno);
+
+ DBG(1, "%s: Finished\n", __func__);
+}
+
+static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+ struct isp1362_ep_queue *epq)
+{
+ struct ptd *ptd = &ep->ptd;
+ int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
+
+ prefetch(ptd);
+ isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
+ if (len)
+ isp1362_write_buffer(isp1362_hcd, ep->data,
+ ep->ptd_offset + PTD_HEADER_SIZE, len);
+
+ dump_ptd(ptd);
+ dump_ptd_out_data(ptd, ep->data);
+}
+
+static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+ struct isp1362_ep_queue *epq)
+{
+ struct ptd *ptd = &ep->ptd;
+ int act_len;
+
+ WARN_ON(list_empty(&ep->active));
+ BUG_ON(ep->ptd_offset < 0);
+
+ list_del_init(&ep->active);
+ DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
+
+ prefetchw(ptd);
+ isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
+ dump_ptd(ptd);
+ act_len = PTD_GET_COUNT(ptd);
+ if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
+ return;
+ if (act_len > ep->length)
+ pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
+ ep->ptd_offset, act_len, ep->length);
+ BUG_ON(act_len > ep->length);
+ /* Only transfer the amount of data that has actually been overwritten
+ * in the chip buffer. We don't want any data that doesn't belong to the
+ * transfer to leak out of the chip to the callers transfer buffer!
+ */
+ prefetchw(ep->data);
+ isp1362_read_buffer(isp1362_hcd, ep->data,
+ ep->ptd_offset + PTD_HEADER_SIZE, act_len);
+ dump_ptd_in_data(ptd, ep->data);
+}
+
+/*
+ * INT PTDs will stay in the chip until data is available.
+ * This function will remove a PTD from the chip when the URB is dequeued.
+ * Must be called with the spinlock held and IRQs disabled
+ */
+static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
+
+{
+ int index;
+ struct isp1362_ep_queue *epq;
+
+ DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
+ BUG_ON(ep->ptd_offset < 0);
+
+ epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
+ BUG_ON(!epq);
+
+ /* put ep in remove_list for cleanup */
+ WARN_ON(!list_empty(&ep->remove_list));
+ list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
+ /* let SOF interrupt handle the cleanup */
+ isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
+
+ index = ep->ptd_index;
+ if (index < 0)
+ /* ISO queues don't have SKIP registers */
+ return;
+
+ DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
+ index, ep->ptd_offset, epq->skip_map, 1 << index);
+
+ /* prevent further processing of PTD (will be effective after next SOF) */
+ epq->skip_map |= 1 << index;
+ if (epq == &isp1362_hcd->atl_queue) {
+ DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
+ if (~epq->skip_map == 0)
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ } else if (epq == &isp1362_hcd->intl_queue) {
+ DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
+ if (~epq->skip_map == 0)
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+ }
+}
+
+/*
+ Take done or failed requests out of schedule. Give back
+ processed urbs.
+*/
+static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+ struct urb *urb, int status)
+ __releases(isp1362_hcd->lock)
+ __acquires(isp1362_hcd->lock)
+{
+ urb->hcpriv = NULL;
+ ep->error_count = 0;
+
+ if (usb_pipecontrol(urb->pipe))
+ ep->nextpid = USB_PID_SETUP;
+
+ URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
+ ep->num_req, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ !usb_pipein(urb->pipe) ? "out" : "in",
+ usb_pipecontrol(urb->pipe) ? "ctrl" :
+ usb_pipeint(urb->pipe) ? "int" :
+ usb_pipebulk(urb->pipe) ? "bulk" :
+ "iso",
+ urb->actual_length, urb->transfer_buffer_length,
+ !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
+ "short_ok" : "", urb->status);
+
+
+ usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
+ spin_unlock(&isp1362_hcd->lock);
+ usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
+ spin_lock(&isp1362_hcd->lock);
+
+ /* take idle endpoints out of the schedule right away */
+ if (!list_empty(&ep->hep->urb_list))
+ return;
+
+ /* async deschedule */
+ if (!list_empty(&ep->schedule)) {
+ list_del_init(&ep->schedule);
+ return;
+ }
+
+
+ if (ep->interval) {
+ /* periodic deschedule */
+ DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
+ ep, ep->branch, ep->load,
+ isp1362_hcd->load[ep->branch],
+ isp1362_hcd->load[ep->branch] - ep->load);
+ isp1362_hcd->load[ep->branch] -= ep->load;
+ ep->branch = PERIODIC_SIZE;
+ }
+}
+
+/*
+ * Analyze transfer results, handle partial transfers and errors
+*/
+static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
+{
+ struct urb *urb = get_urb(ep);
+ struct usb_device *udev;
+ struct ptd *ptd;
+ int short_ok;
+ u16 len;
+ int urbstat = -EINPROGRESS;
+ u8 cc;
+
+ DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
+
+ udev = urb->dev;
+ ptd = &ep->ptd;
+ cc = PTD_GET_CC(ptd);
+ if (cc == PTD_NOTACCESSED) {
+ pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
+ ep->num_req, ptd);
+ cc = PTD_DEVNOTRESP;
+ }
+
+ short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
+ len = urb->transfer_buffer_length - urb->actual_length;
+
+ /* Data underrun is special. For allowed underrun
+ we clear the error and continue as normal. For
+ forbidden underrun we finish the DATA stage
+ immediately while for control transfer,
+ we do a STATUS stage.
+ */
+ if (cc == PTD_DATAUNDERRUN) {
+ if (short_ok) {
+ DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
+ __func__, ep->num_req, short_ok ? "" : "not_",
+ PTD_GET_COUNT(ptd), ep->maxpacket, len);
+ cc = PTD_CC_NOERROR;
+ urbstat = 0;
+ } else {
+ DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
+ __func__, ep->num_req,
+ usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
+ short_ok ? "" : "not_",
+ PTD_GET_COUNT(ptd), ep->maxpacket, len);
+ /* save the data underrun error code for later and
+ * proceed with the status stage
+ */
+ urb->actual_length += PTD_GET_COUNT(ptd);
+ if (usb_pipecontrol(urb->pipe)) {
+ ep->nextpid = USB_PID_ACK;
+ BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+
+ if (urb->status == -EINPROGRESS)
+ urb->status = cc_to_error[PTD_DATAUNDERRUN];
+ } else {
+ usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
+ PTD_GET_TOGGLE(ptd));
+ urbstat = cc_to_error[PTD_DATAUNDERRUN];
+ }
+ goto out;
+ }
+ }
+
+ if (cc != PTD_CC_NOERROR) {
+ if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
+ urbstat = cc_to_error[cc];
+ DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
+ __func__, ep->num_req, ep->nextpid, urbstat, cc,
+ ep->error_count);
+ }
+ goto out;
+ }
+
+ switch (ep->nextpid) {
+ case USB_PID_OUT:
+ if (PTD_GET_COUNT(ptd) != ep->length)
+ pr_err("%s: count=%d len=%d\n", __func__,
+ PTD_GET_COUNT(ptd), ep->length);
+ BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
+ urb->actual_length += ep->length;
+ BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+ usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
+ if (urb->actual_length == urb->transfer_buffer_length) {
+ DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
+ ep->num_req, len, ep->maxpacket, urbstat);
+ if (usb_pipecontrol(urb->pipe)) {
+ DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
+ ep->num_req,
+ usb_pipein(urb->pipe) ? "IN" : "OUT");
+ ep->nextpid = USB_PID_ACK;
+ } else {
+ if (len % ep->maxpacket ||
+ !(urb->transfer_flags & URB_ZERO_PACKET)) {
+ urbstat = 0;
+ DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
+ __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
+ urbstat, len, ep->maxpacket, urb->actual_length);
+ }
+ }
+ }
+ break;
+ case USB_PID_IN:
+ len = PTD_GET_COUNT(ptd);
+ BUG_ON(len > ep->length);
+ urb->actual_length += len;
+ BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+ usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
+ /* if transfer completed or (allowed) data underrun */
+ if ((urb->transfer_buffer_length == urb->actual_length) ||
+ len % ep->maxpacket) {
+ DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
+ ep->num_req, len, ep->maxpacket, urbstat);
+ if (usb_pipecontrol(urb->pipe)) {
+ DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
+ ep->num_req,
+ usb_pipein(urb->pipe) ? "IN" : "OUT");
+ ep->nextpid = USB_PID_ACK;
+ } else {
+ urbstat = 0;
+ DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
+ __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
+ urbstat, len, ep->maxpacket, urb->actual_length);
+ }
+ }
+ break;
+ case USB_PID_SETUP:
+ if (urb->transfer_buffer_length == urb->actual_length) {
+ ep->nextpid = USB_PID_ACK;
+ } else if (usb_pipeout(urb->pipe)) {
+ usb_settoggle(udev, 0, 1, 1);
+ ep->nextpid = USB_PID_OUT;
+ } else {
+ usb_settoggle(udev, 0, 0, 1);
+ ep->nextpid = USB_PID_IN;
+ }
+ break;
+ case USB_PID_ACK:
+ DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
+ urbstat);
+ WARN_ON(urbstat != -EINPROGRESS);
+ urbstat = 0;
+ ep->nextpid = 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ out:
+ if (urbstat != -EINPROGRESS) {
+ DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
+ ep, ep->num_req, urb, urbstat);
+ finish_request(isp1362_hcd, ep, urb, urbstat);
+ }
+}
+
+static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
+{
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+
+ list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
+ struct isp1362_ep_queue *epq =
+ get_ptd_queue(isp1362_hcd, ep->ptd_offset);
+ int index = ep->ptd_index;
+
+ BUG_ON(epq == NULL);
+ if (index >= 0) {
+ DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
+ BUG_ON(ep->num_ptds == 0);
+ release_ptd_buffers(epq, ep);
+ }
+ if (!list_empty(&ep->hep->urb_list)) {
+ struct urb *urb = get_urb(ep);
+
+ DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
+ ep->num_req, ep);
+ finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
+ }
+ WARN_ON(list_empty(&ep->active));
+ if (!list_empty(&ep->active)) {
+ list_del_init(&ep->active);
+ DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
+ }
+ list_del_init(&ep->remove_list);
+ DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
+ }
+ DBG(1, "%s: Done\n", __func__);
+}
+
+static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
+{
+ if (count > 0) {
+ if (count < isp1362_hcd->atl_queue.ptd_count)
+ isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
+ isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ } else
+ isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
+}
+
+static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
+}
+
+static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
+{
+ isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
+ HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
+}
+
+static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
+ struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
+{
+ int index = epq->free_ptd;
+
+ prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
+ index = claim_ptd_buffers(epq, ep, ep->length);
+ if (index == -ENOMEM) {
+ DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
+ ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
+ return index;
+ } else if (index == -EOVERFLOW) {
+ DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
+ __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
+ epq->buf_map, epq->skip_map);
+ return index;
+ } else
+ BUG_ON(index < 0);
+ list_add_tail(&ep->active, &epq->active);
+ DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
+ ep, ep->num_req, ep->length, &epq->active);
+ DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
+ ep->ptd_offset, ep, ep->num_req);
+ isp1362_write_ptd(isp1362_hcd, ep, epq);
+ __clear_bit(ep->ptd_index, &epq->skip_map);
+
+ return 0;
+}
+
+static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ int ptd_count = 0;
+ struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
+ struct isp1362_ep *ep;
+ int defer = 0;
+
+ if (atomic_read(&epq->finishing)) {
+ DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+ return;
+ }
+
+ list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
+ struct urb *urb = get_urb(ep);
+ int ret;
+
+ if (!list_empty(&ep->active)) {
+ DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
+ continue;
+ }
+
+ DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
+ ep, ep->num_req);
+
+ ret = submit_req(isp1362_hcd, urb, ep, epq);
+ if (ret == -ENOMEM) {
+ defer = 1;
+ break;
+ } else if (ret == -EOVERFLOW) {
+ defer = 1;
+ continue;
+ }
+#ifdef BUGGY_PXA2XX_UDC_USBTEST
+ defer = ep->nextpid == USB_PID_SETUP;
+#endif
+ ptd_count++;
+ }
+
+ /* Avoid starving of endpoints */
+ if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
+ DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
+ list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
+ }
+ if (ptd_count || defer)
+ enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
+
+ epq->ptd_count += ptd_count;
+ if (epq->ptd_count > epq->stat_maxptds) {
+ epq->stat_maxptds = epq->ptd_count;
+ DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
+ }
+}
+
+static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ int ptd_count = 0;
+ struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
+ struct isp1362_ep *ep;
+
+ if (atomic_read(&epq->finishing)) {
+ DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+ return;
+ }
+
+ list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
+ struct urb *urb = get_urb(ep);
+ int ret;
+
+ if (!list_empty(&ep->active)) {
+ DBG(1, "%s: Skipping active %s ep %p\n", __func__,
+ epq->name, ep);
+ continue;
+ }
+
+ DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
+ epq->name, ep, ep->num_req);
+ ret = submit_req(isp1362_hcd, urb, ep, epq);
+ if (ret == -ENOMEM)
+ break;
+ else if (ret == -EOVERFLOW)
+ continue;
+ ptd_count++;
+ }
+
+ if (ptd_count) {
+ static int last_count;
+
+ if (ptd_count != last_count) {
+ DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
+ last_count = ptd_count;
+ }
+ enable_intl_transfers(isp1362_hcd);
+ }
+
+ epq->ptd_count += ptd_count;
+ if (epq->ptd_count > epq->stat_maxptds)
+ epq->stat_maxptds = epq->ptd_count;
+}
+
+static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
+{
+ u16 ptd_offset = ep->ptd_offset;
+ int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
+
+ DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
+ ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
+
+ ptd_offset += num_ptds * epq->blk_size;
+ if (ptd_offset < epq->buf_start + epq->buf_size)
+ return ptd_offset;
+ else
+ return -ENOMEM;
+}
+
+static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ int ptd_count = 0;
+ int flip = isp1362_hcd->istl_flip;
+ struct isp1362_ep_queue *epq;
+ int ptd_offset;
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+ u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+
+ fill2:
+ epq = &isp1362_hcd->istl_queue[flip];
+ if (atomic_read(&epq->finishing)) {
+ DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+ return;
+ }
+
+ if (!list_empty(&epq->active))
+ return;
+
+ ptd_offset = epq->buf_start;
+ list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
+ struct urb *urb = get_urb(ep);
+ s16 diff = fno - (u16)urb->start_frame;
+
+ DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
+
+ if (diff > urb->number_of_packets) {
+ /* time frame for this URB has elapsed */
+ finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
+ continue;
+ } else if (diff < -1) {
+ /* URB is not due in this frame or the next one.
+ * Comparing with '-1' instead of '0' accounts for double
+ * buffering in the ISP1362 which enables us to queue the PTD
+ * one frame ahead of time
+ */
+ } else if (diff == -1) {
+ /* submit PTD's that are due in the next frame */
+ prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
+ if (ptd_offset + PTD_HEADER_SIZE + ep->length >
+ epq->buf_start + epq->buf_size) {
+ pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
+ __func__, ep->length);
+ continue;
+ }
+ ep->ptd_offset = ptd_offset;
+ list_add_tail(&ep->active, &epq->active);
+
+ ptd_offset = next_ptd(epq, ep);
+ if (ptd_offset < 0) {
+ pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
+ ep->num_req, epq->name);
+ break;
+ }
+ }
+ }
+ list_for_each_entry(ep, &epq->active, active) {
+ if (epq->active.next == &ep->active)
+ ep->ptd.mps |= PTD_LAST_MSK;
+ isp1362_write_ptd(isp1362_hcd, ep, epq);
+ ptd_count++;
+ }
+
+ if (ptd_count)
+ enable_istl_transfers(isp1362_hcd, flip);
+
+ epq->ptd_count += ptd_count;
+ if (epq->ptd_count > epq->stat_maxptds)
+ epq->stat_maxptds = epq->ptd_count;
+
+ /* check, whether the second ISTL buffer may also be filled */
+ if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
+ fno++;
+ ptd_count = 0;
+ flip = 1 - flip;
+ goto fill2;
+ }
+}
+
+static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
+ struct isp1362_ep_queue *epq)
+{
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+
+ if (list_empty(&epq->active)) {
+ DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
+ return;
+ }
+
+ DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
+
+ atomic_inc(&epq->finishing);
+ list_for_each_entry_safe(ep, tmp, &epq->active, active) {
+ int index = ep->ptd_index;
+
+ DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
+ index, ep->ptd_offset);
+
+ BUG_ON(index < 0);
+ if (__test_and_clear_bit(index, &done_map)) {
+ isp1362_read_ptd(isp1362_hcd, ep, epq);
+ epq->free_ptd = index;
+ BUG_ON(ep->num_ptds == 0);
+ release_ptd_buffers(epq, ep);
+
+ DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
+ ep, ep->num_req);
+ if (!list_empty(&ep->remove_list)) {
+ list_del_init(&ep->remove_list);
+ DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
+ }
+ DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
+ ep, ep->num_req);
+ postproc_ep(isp1362_hcd, ep);
+ }
+ if (!done_map)
+ break;
+ }
+ if (done_map)
+ pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
+ epq->skip_map);
+ atomic_dec(&epq->finishing);
+}
+
+static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
+{
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+
+ if (list_empty(&epq->active)) {
+ DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
+ return;
+ }
+
+ DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
+
+ atomic_inc(&epq->finishing);
+ list_for_each_entry_safe(ep, tmp, &epq->active, active) {
+ DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
+
+ isp1362_read_ptd(isp1362_hcd, ep, epq);
+ DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
+ postproc_ep(isp1362_hcd, ep);
+ }
+ WARN_ON(epq->blk_size != 0);
+ atomic_dec(&epq->finishing);
+}
+
+static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
+{
+ int handled = 0;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ u16 irqstat;
+ u16 svc_mask;
+
+ spin_lock(&isp1362_hcd->lock);
+
+ BUG_ON(isp1362_hcd->irq_active++);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+ irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
+ DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
+
+ /* only handle interrupts that are currently enabled */
+ irqstat &= isp1362_hcd->irqenb;
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
+ svc_mask = irqstat;
+
+ if (irqstat & HCuPINT_SOF) {
+ isp1362_hcd->irqenb &= ~HCuPINT_SOF;
+ isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_SOF;
+ DBG(3, "%s: SOF\n", __func__);
+ isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+ if (!list_empty(&isp1362_hcd->remove_list))
+ finish_unlinks(isp1362_hcd);
+ if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
+ if (list_empty(&isp1362_hcd->atl_queue.active)) {
+ start_atl_transfers(isp1362_hcd);
+ } else {
+ isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
+ isp1362_hcd->atl_queue.skip_map);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ }
+ }
+ }
+
+ if (irqstat & HCuPINT_ISTL0) {
+ isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_ISTL0;
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
+ DBG(1, "%s: ISTL0\n", __func__);
+ WARN_ON((int)!!isp1362_hcd->istl_flip);
+ WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL0_ACTIVE);
+ WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL0_DONE));
+ isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
+ }
+
+ if (irqstat & HCuPINT_ISTL1) {
+ isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_ISTL1;
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
+ DBG(1, "%s: ISTL1\n", __func__);
+ WARN_ON(!(int)isp1362_hcd->istl_flip);
+ WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL1_ACTIVE);
+ WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL1_DONE));
+ isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
+ }
+
+ if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
+ WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
+ (HCuPINT_ISTL0 | HCuPINT_ISTL1));
+ finish_iso_transfers(isp1362_hcd,
+ &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
+ start_iso_transfers(isp1362_hcd);
+ isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
+ }
+
+ if (irqstat & HCuPINT_INTL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
+ u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
+ isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
+
+ DBG(2, "%s: INTL\n", __func__);
+
+ svc_mask &= ~HCuPINT_INTL;
+
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
+ if (~(done_map | skip_map) == 0)
+ /* All PTDs are finished, disable INTL processing entirely */
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+
+ handled = 1;
+ WARN_ON(!done_map);
+ if (done_map) {
+ DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
+ start_intl_transfers(isp1362_hcd);
+ }
+ }
+
+ if (irqstat & HCuPINT_ATL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
+ u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
+ isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
+
+ DBG(2, "%s: ATL\n", __func__);
+
+ svc_mask &= ~HCuPINT_ATL;
+
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
+ if (~(done_map | skip_map) == 0)
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ if (done_map) {
+ DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
+ start_atl_transfers(isp1362_hcd);
+ }
+ handled = 1;
+ }
+
+ if (irqstat & HCuPINT_OPR) {
+ u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
+ isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
+
+ svc_mask &= ~HCuPINT_OPR;
+ DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
+ intstat &= isp1362_hcd->intenb;
+ if (intstat & OHCI_INTR_UE) {
+ pr_err("Unrecoverable error\n");
+ /* FIXME: do here reset or cleanup or whatever */
+ }
+ if (intstat & OHCI_INTR_RHSC) {
+ isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
+ isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
+ isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
+ }
+ if (intstat & OHCI_INTR_RD) {
+ pr_info("%s: RESUME DETECTED\n", __func__);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ usb_hcd_resume_root_hub(hcd);
+ }
+ isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
+ irqstat &= ~HCuPINT_OPR;
+ handled = 1;
+ }
+
+ if (irqstat & HCuPINT_SUSP) {
+ isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_SUSP;
+
+ pr_info("%s: SUSPEND IRQ\n", __func__);
+ }
+
+ if (irqstat & HCuPINT_CLKRDY) {
+ isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
+ handled = 1;
+ isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
+ svc_mask &= ~HCuPINT_CLKRDY;
+ pr_info("%s: CLKRDY IRQ\n", __func__);
+ }
+
+ if (svc_mask)
+ pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+ isp1362_hcd->irq_active--;
+ spin_unlock(&isp1362_hcd->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
+static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
+{
+ int i, branch = -ENOSPC;
+
+ /* search for the least loaded schedule branch of that interval
+ * which has enough bandwidth left unreserved.
+ */
+ for (i = 0; i < interval; i++) {
+ if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
+ int j;
+
+ for (j = i; j < PERIODIC_SIZE; j += interval) {
+ if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
+ pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
+ load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
+ break;
+ }
+ }
+ if (j < PERIODIC_SIZE)
+ continue;
+ branch = i;
+ }
+ }
+ return branch;
+}
+
+/* NB! ALL the code above this point runs with isp1362_hcd->lock
+ held, irqs off
+*/
+
+/*-------------------------------------------------------------------------*/
+
+static int isp1362_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct usb_device *udev = urb->dev;
+ unsigned int pipe = urb->pipe;
+ int is_out = !usb_pipein(pipe);
+ int type = usb_pipetype(pipe);
+ int epnum = usb_pipeendpoint(pipe);
+ struct usb_host_endpoint *hep = urb->ep;
+ struct isp1362_ep *ep = NULL;
+ unsigned long flags;
+ int retval = 0;
+
+ DBG(3, "%s: urb %p\n", __func__, urb);
+
+ if (type == PIPE_ISOCHRONOUS) {
+ pr_err("Isochronous transfers not supported\n");
+ return -ENOSPC;
+ }
+
+ URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
+ usb_pipedevice(pipe), epnum,
+ is_out ? "out" : "in",
+ usb_pipecontrol(pipe) ? "ctrl" :
+ usb_pipeint(pipe) ? "int" :
+ usb_pipebulk(pipe) ? "bulk" :
+ "iso",
+ urb->transfer_buffer_length,
+ (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
+ !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
+ "short_ok" : "");
+
+ /* avoid all allocations within spinlocks: request or endpoint */
+ if (!hep->hcpriv) {
+ ep = kzalloc(sizeof *ep, mem_flags);
+ if (!ep)
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ /* don't submit to a dead or disabled port */
+ if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
+ USB_PORT_STAT_ENABLE) ||
+ !HC_IS_RUNNING(hcd->state)) {
+ kfree(ep);
+ retval = -ENODEV;
+ goto fail_not_linked;
+ }
+
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval) {
+ kfree(ep);
+ goto fail_not_linked;
+ }
+
+ if (hep->hcpriv) {
+ ep = hep->hcpriv;
+ } else {
+ INIT_LIST_HEAD(&ep->schedule);
+ INIT_LIST_HEAD(&ep->active);
+ INIT_LIST_HEAD(&ep->remove_list);
+ ep->udev = usb_get_dev(udev);
+ ep->hep = hep;
+ ep->epnum = epnum;
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->ptd_offset = -EINVAL;
+ ep->ptd_index = -EINVAL;
+ usb_settoggle(udev, epnum, is_out, 0);
+
+ if (type == PIPE_CONTROL)
+ ep->nextpid = USB_PID_SETUP;
+ else if (is_out)
+ ep->nextpid = USB_PID_OUT;
+ else
+ ep->nextpid = USB_PID_IN;
+
+ switch (type) {
+ case PIPE_ISOCHRONOUS:
+ case PIPE_INTERRUPT:
+ if (urb->interval > PERIODIC_SIZE)
+ urb->interval = PERIODIC_SIZE;
+ ep->interval = urb->interval;
+ ep->branch = PERIODIC_SIZE;
+ ep->load = usb_calc_bus_time(udev->speed, !is_out,
+ (type == PIPE_ISOCHRONOUS),
+ usb_maxpacket(udev, pipe, is_out)) / 1000;
+ break;
+ }
+ hep->hcpriv = ep;
+ }
+ ep->num_req = isp1362_hcd->req_serial++;
+
+ /* maybe put endpoint into schedule */
+ switch (type) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ if (list_empty(&ep->schedule)) {
+ DBG(1, "%s: Adding ep %p req %d to async schedule\n",
+ __func__, ep, ep->num_req);
+ list_add_tail(&ep->schedule, &isp1362_hcd->async);
+ }
+ break;
+ case PIPE_ISOCHRONOUS:
+ case PIPE_INTERRUPT:
+ urb->interval = ep->interval;
+
+ /* urb submitted for already existing EP */
+ if (ep->branch < PERIODIC_SIZE)
+ break;
+
+ retval = balance(isp1362_hcd, ep->interval, ep->load);
+ if (retval < 0) {
+ pr_err("%s: balance returned %d\n", __func__, retval);
+ goto fail;
+ }
+ ep->branch = retval;
+ retval = 0;
+ isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+ DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
+ __func__, isp1362_hcd->fmindex, ep->branch,
+ ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
+ ~(PERIODIC_SIZE - 1)) + ep->branch,
+ (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
+
+ if (list_empty(&ep->schedule)) {
+ if (type == PIPE_ISOCHRONOUS) {
+ u16 frame = isp1362_hcd->fmindex;
+
+ frame += max_t(u16, 8, ep->interval);
+ frame &= ~(ep->interval - 1);
+ frame |= ep->branch;
+ if (frame_before(frame, isp1362_hcd->fmindex))
+ frame += ep->interval;
+ urb->start_frame = frame;
+
+ DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
+ list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
+ } else {
+ DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
+ list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
+ }
+ } else
+ DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
+
+ DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
+ ep->load / ep->interval, isp1362_hcd->load[ep->branch],
+ isp1362_hcd->load[ep->branch] + ep->load);
+ isp1362_hcd->load[ep->branch] += ep->load;
+ }
+
+ urb->hcpriv = hep;
+ ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
+
+ switch (type) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ start_atl_transfers(isp1362_hcd);
+ break;
+ case PIPE_INTERRUPT:
+ start_intl_transfers(isp1362_hcd);
+ break;
+ case PIPE_ISOCHRONOUS:
+ start_iso_transfers(isp1362_hcd);
+ break;
+ default:
+ BUG();
+ }
+ fail:
+ if (retval)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+
+ fail_not_linked:
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (retval)
+ DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
+ return retval;
+}
+
+static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct usb_host_endpoint *hep;
+ unsigned long flags;
+ struct isp1362_ep *ep;
+ int retval = 0;
+
+ DBG(3, "%s: urb %p\n", __func__, urb);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval)
+ goto done;
+
+ hep = urb->hcpriv;
+
+ if (!hep) {
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return -EIDRM;
+ }
+
+ ep = hep->hcpriv;
+ if (ep) {
+ /* In front of queue? */
+ if (ep->hep->urb_list.next == &urb->urb_list) {
+ if (!list_empty(&ep->active)) {
+ DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
+ urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
+ /* disable processing and queue PTD for removal */
+ remove_ptd(isp1362_hcd, ep);
+ urb = NULL;
+ }
+ }
+ if (urb) {
+ DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
+ ep->num_req);
+ finish_request(isp1362_hcd, ep, urb, status);
+ } else
+ DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
+ } else {
+ pr_warning("%s: No EP in URB %p\n", __func__, urb);
+ retval = -EINVAL;
+ }
+done:
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ DBG(3, "%s: exit\n", __func__);
+
+ return retval;
+}
+
+static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+ struct isp1362_ep *ep = hep->hcpriv;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+
+ DBG(1, "%s: ep %p\n", __func__, ep);
+ if (!ep)
+ return;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ if (!list_empty(&hep->urb_list)) {
+ if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
+ DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
+ ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
+ remove_ptd(isp1362_hcd, ep);
+ pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
+ }
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ /* Wait for interrupt to clear out active list */
+ while (!list_empty(&ep->active))
+ msleep(1);
+
+ DBG(1, "%s: Freeing EP %p\n", __func__, ep);
+
+ usb_put_dev(ep->udev);
+ kfree(ep);
+ hep->hcpriv = NULL;
+}
+
+static int isp1362_get_frame(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ u32 fmnum;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ return (int)fmnum;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Adapted from ohci-hub.c */
+static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ int ports, i, changed = 0;
+ unsigned long flags;
+
+ if (!HC_IS_RUNNING(hcd->state))
+ return -ESHUTDOWN;
+
+ /* Report no status change now, if we are scheduled to be
+ called later */
+ if (timer_pending(&hcd->rh_timer))
+ return 0;
+
+ ports = isp1362_hcd->rhdesca & RH_A_NDP;
+ BUG_ON(ports > 2);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ /* init status */
+ if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
+ buf[0] = changed = 1;
+ else
+ buf[0] = 0;
+
+ for (i = 0; i < ports; i++) {
+ u32 status = isp1362_hcd->rhport[i];
+
+ if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
+ RH_PS_OCIC | RH_PS_PRSC)) {
+ changed = 1;
+ buf[0] |= 1 << (i + 1);
+ continue;
+ }
+
+ if (!(status & RH_PS_CCS))
+ continue;
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return changed;
+}
+
+static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
+ struct usb_hub_descriptor *desc)
+{
+ u32 reg = isp1362_hcd->rhdesca;
+
+ DBG(3, "%s: enter\n", __func__);
+
+ desc->bDescriptorType = 0x29;
+ desc->bDescLength = 9;
+ desc->bHubContrCurrent = 0;
+ desc->bNbrPorts = reg & 0x3;
+ /* Power switching, device type, overcurrent. */
+ desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
+ DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
+ desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
+ /* ports removable, and legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
+ desc->u.hs.DeviceRemovable[1] = ~0;
+
+ DBG(3, "%s: exit\n", __func__);
+}
+
+/* Adapted from ohci-hub.c */
+static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ int retval = 0;
+ unsigned long flags;
+ unsigned long t1;
+ int ports = isp1362_hcd->rhdesca & RH_A_NDP;
+ u32 tmp = 0;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ DBG(0, "ClearHubFeature: ");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ DBG(0, "C_HUB_OVER_CURRENT\n");
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ case C_HUB_LOCAL_POWER:
+ DBG(0, "C_HUB_LOCAL_POWER\n");
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetHubFeature:
+ DBG(0, "SetHubFeature: ");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case GetHubDescriptor:
+ DBG(0, "GetHubDescriptor\n");
+ isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
+ break;
+ case GetHubStatus:
+ DBG(0, "GetHubStatus\n");
+ put_unaligned(cpu_to_le32(0), (__le32 *) buf);
+ break;
+ case GetPortStatus:
+#ifndef VERBOSE
+ DBG(0, "GetPortStatus\n");
+#endif
+ if (!wIndex || wIndex > ports)
+ goto error;
+ tmp = isp1362_hcd->rhport[--wIndex];
+ put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
+ break;
+ case ClearPortFeature:
+ DBG(0, "ClearPortFeature: ");
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ DBG(0, "USB_PORT_FEAT_ENABLE\n");
+ tmp = RH_PS_CCS;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
+ tmp = RH_PS_PESC;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+ tmp = RH_PS_POCI;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
+ tmp = RH_PS_PSSC;
+ break;
+ case USB_PORT_FEAT_POWER:
+ DBG(0, "USB_PORT_FEAT_POWER\n");
+ tmp = RH_PS_LSDA;
+
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
+ tmp = RH_PS_CSC;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ tmp = RH_PS_OCIC;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ DBG(0, "USB_PORT_FEAT_C_RESET\n");
+ tmp = RH_PS_PRSC;
+ break;
+ default:
+ goto error;
+ }
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
+ isp1362_hcd->rhport[wIndex] =
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ case SetPortFeature:
+ DBG(0, "SetPortFeature: ");
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
+ isp1362_hcd->rhport[wIndex] =
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ case USB_PORT_FEAT_POWER:
+ DBG(0, "USB_PORT_FEAT_POWER\n");
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
+ isp1362_hcd->rhport[wIndex] =
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ case USB_PORT_FEAT_RESET:
+ DBG(0, "USB_PORT_FEAT_RESET\n");
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
+ while (time_before(jiffies, t1)) {
+ /* spin until any current reset finishes */
+ for (;;) {
+ tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ if (!(tmp & RH_PS_PRS))
+ break;
+ udelay(500);
+ }
+ if (!(tmp & RH_PS_CCS))
+ break;
+ /* Reset lasts 10ms (claims datasheet) */
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ }
+
+ isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
+ HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ default:
+ error:
+ /* "protocol stall" on error */
+ DBG(0, "PROTOCOL STALL\n");
+ retval = -EPIPE;
+ }
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int isp1362_bus_suspend(struct usb_hcd *hcd)
+{
+ int status = 0;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+
+ if (time_before(jiffies, isp1362_hcd->next_statechange))
+ msleep(5);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+ switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_RESUME:
+ DBG(0, "%s: resume/suspend?\n", __func__);
+ isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
+ isp1362_hcd->hc_control |= OHCI_USB_RESET;
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ /* FALL THROUGH */
+ case OHCI_USB_RESET:
+ status = -EBUSY;
+ pr_warning("%s: needs reinit!\n", __func__);
+ goto done;
+ case OHCI_USB_SUSPEND:
+ pr_warning("%s: already suspended?\n", __func__);
+ goto done;
+ }
+ DBG(0, "%s: suspend root hub\n", __func__);
+
+ /* First stop any processing */
+ hcd->state = HC_STATE_QUIESCING;
+ if (!list_empty(&isp1362_hcd->atl_queue.active) ||
+ !list_empty(&isp1362_hcd->intl_queue.active) ||
+ !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
+ !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
+ int limit;
+
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
+ isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+ isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
+
+ DBG(0, "%s: stopping schedules ...\n", __func__);
+ limit = 2000;
+ while (limit > 0) {
+ udelay(250);
+ limit -= 250;
+ if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
+ break;
+ }
+ mdelay(7);
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
+ }
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
+ }
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
+ finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
+ finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
+ }
+ DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+ isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
+ isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+
+ /* Suspend hub */
+ isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+
+#if 1
+ isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+ if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
+ pr_err("%s: controller won't suspend %08x\n", __func__,
+ isp1362_hcd->hc_control);
+ status = -EBUSY;
+ } else
+#endif
+ {
+ /* no resumes until devices finish suspending */
+ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
+ }
+done:
+ if (status == 0) {
+ hcd->state = HC_STATE_SUSPENDED;
+ DBG(0, "%s: HCD suspended: %08x\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return status;
+}
+
+static int isp1362_bus_resume(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ u32 port;
+ unsigned long flags;
+ int status = -EINPROGRESS;
+
+ if (time_before(jiffies, isp1362_hcd->next_statechange))
+ msleep(5);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+ pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
+ if (hcd->state == HC_STATE_RESUMING) {
+ pr_warning("%s: duplicate resume\n", __func__);
+ status = 0;
+ } else
+ switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_SUSPEND:
+ DBG(0, "%s: resume root hub\n", __func__);
+ isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
+ isp1362_hcd->hc_control |= OHCI_USB_RESUME;
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ break;
+ case OHCI_USB_RESUME:
+ /* HCFS changes sometime after INTR_RD */
+ DBG(0, "%s: remote wakeup\n", __func__);
+ break;
+ case OHCI_USB_OPER:
+ DBG(0, "%s: odd resume\n", __func__);
+ status = 0;
+ hcd->self.root_hub->dev.power.power_state = PMSG_ON;
+ break;
+ default: /* RESET, we lost power */
+ DBG(0, "%s: root hub hardware reset\n", __func__);
+ status = -EBUSY;
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (status == -EBUSY) {
+ DBG(0, "%s: Restarting HC\n", __func__);
+ isp1362_hc_stop(hcd);
+ return isp1362_hc_start(hcd);
+ }
+ if (status != -EINPROGRESS)
+ return status;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
+ while (port--) {
+ u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
+
+ /* force global, not selective, resume */
+ if (!(stat & RH_PS_PSS)) {
+ DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
+ continue;
+ }
+ DBG(0, "%s: Resuming RH port %d\n", __func__, port);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ /* Some controllers (lucent) need extra-long delays */
+ hcd->state = HC_STATE_RESUMING;
+ mdelay(20 /* usb 11.5.1.10 */ + 15);
+
+ isp1362_hcd->hc_control = OHCI_USB_OPER;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ /* TRSMRCY */
+ msleep(10);
+
+ /* keep it alive for ~5x suspend + resume costs */
+ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
+
+ hcd->self.root_hub->dev.power.power_state = PMSG_ON;
+ hcd->state = HC_STATE_RUNNING;
+ return 0;
+}
+#else
+#define isp1362_bus_suspend NULL
+#define isp1362_bus_resume NULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void dump_irq(struct seq_file *s, char *label, u16 mask)
+{
+ seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
+ mask & HCuPINT_CLKRDY ? " clkrdy" : "",
+ mask & HCuPINT_SUSP ? " susp" : "",
+ mask & HCuPINT_OPR ? " opr" : "",
+ mask & HCuPINT_EOT ? " eot" : "",
+ mask & HCuPINT_ATL ? " atl" : "",
+ mask & HCuPINT_SOF ? " sof" : "");
+}
+
+static void dump_int(struct seq_file *s, char *label, u32 mask)
+{
+ seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
+ mask & OHCI_INTR_MIE ? " MIE" : "",
+ mask & OHCI_INTR_RHSC ? " rhsc" : "",
+ mask & OHCI_INTR_FNO ? " fno" : "",
+ mask & OHCI_INTR_UE ? " ue" : "",
+ mask & OHCI_INTR_RD ? " rd" : "",
+ mask & OHCI_INTR_SF ? " sof" : "",
+ mask & OHCI_INTR_SO ? " so" : "");
+}
+
+static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
+{
+ seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
+ mask & OHCI_CTRL_RWC ? " rwc" : "",
+ mask & OHCI_CTRL_RWE ? " rwe" : "",
+ ({
+ char *hcfs;
+ switch (mask & OHCI_CTRL_HCFS) {
+ case OHCI_USB_OPER:
+ hcfs = " oper";
+ break;
+ case OHCI_USB_RESET:
+ hcfs = " reset";
+ break;
+ case OHCI_USB_RESUME:
+ hcfs = " resume";
+ break;
+ case OHCI_USB_SUSPEND:
+ hcfs = " suspend";
+ break;
+ default:
+ hcfs = " ?";
+ }
+ hcfs;
+ }));
+}
+
+static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
+{
+ seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
+ isp1362_read_reg32(isp1362_hcd, HCREVISION));
+ seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
+ isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+ seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
+ isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
+ seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
+ isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+ seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
+ isp1362_read_reg32(isp1362_hcd, HCINTENB));
+ seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
+ isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
+ seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
+ isp1362_read_reg32(isp1362_hcd, HCFMREM));
+ seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
+ isp1362_read_reg32(isp1362_hcd, HCFMNUM));
+ seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
+ isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
+ seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
+ isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
+ seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
+ isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
+ seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
+ isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
+ seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
+ seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
+ isp1362_read_reg16(isp1362_hcd, HCHWCFG));
+ seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
+ isp1362_read_reg16(isp1362_hcd, HCDMACFG));
+ seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
+ isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
+ seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
+ isp1362_read_reg16(isp1362_hcd, HCuPINT));
+ seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
+ isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
+ seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
+ isp1362_read_reg16(isp1362_hcd, HCCHIPID));
+ seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
+ isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
+ seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
+ isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
+ seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
+ isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
+#if 0
+ seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
+ isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
+#endif
+ seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
+ isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
+ seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
+ isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
+ isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
+ seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
+ isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
+ seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
+ isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
+ seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
+ isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
+ seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
+ isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
+ seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
+ isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
+ isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
+ seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
+ isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
+#if 0
+ seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
+ isp1362_read_reg32(isp1362_hcd, HCATLDONE));
+#endif
+ seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
+ isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
+ seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
+ isp1362_read_reg32(isp1362_hcd, HCATLLAST));
+ seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
+ isp1362_read_reg16(isp1362_hcd, HCATLCURR));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
+ isp1362_read_reg16(isp1362_hcd, HCATLDTC));
+ seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
+ isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
+}
+
+static int isp1362_show(struct seq_file *s, void *unused)
+{
+ struct isp1362_hcd *isp1362_hcd = s->private;
+ struct isp1362_ep *ep;
+ int i;
+
+ seq_printf(s, "%s\n%s version %s\n",
+ isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
+
+ /* collect statistics to help estimate potential win for
+ * DMA engines that care about alignment (PXA)
+ */
+ seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
+ isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
+ isp1362_hcd->stat2, isp1362_hcd->stat1);
+ seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
+ seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
+ seq_printf(s, "max # ptds in ISTL fifo: %d\n",
+ max(isp1362_hcd->istl_queue[0] .stat_maxptds,
+ isp1362_hcd->istl_queue[1] .stat_maxptds));
+
+ /* FIXME: don't show the following in suspended state */
+ spin_lock_irq(&isp1362_hcd->lock);
+
+ dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
+ dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
+ dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
+ dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+ dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+
+ for (i = 0; i < NUM_ISP1362_IRQS; i++)
+ if (isp1362_hcd->irq_stat[i])
+ seq_printf(s, "%-15s: %d\n",
+ ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
+
+ dump_regs(s, isp1362_hcd);
+ list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
+ struct urb *urb;
+
+ seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
+ ({
+ char *s;
+ switch (ep->nextpid) {
+ case USB_PID_IN:
+ s = "in";
+ break;
+ case USB_PID_OUT:
+ s = "out";
+ break;
+ case USB_PID_SETUP:
+ s = "setup";
+ break;
+ case USB_PID_ACK:
+ s = "status";
+ break;
+ default:
+ s = "?";
+ break;
+ }
+ s;}), ep->maxpacket) ;
+ list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
+ seq_printf(s, " urb%p, %d/%d\n", urb,
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ }
+ }
+ if (!list_empty(&isp1362_hcd->async))
+ seq_printf(s, "\n");
+ dump_ptd_queue(&isp1362_hcd->atl_queue);
+
+ seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
+
+ list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
+ seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
+ isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
+
+ seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
+ ep->interval, ep,
+ (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
+ ep->udev->devnum, ep->epnum,
+ (ep->epnum == 0) ? "" :
+ ((ep->nextpid == USB_PID_IN) ?
+ "in" : "out"), ep->maxpacket);
+ }
+ dump_ptd_queue(&isp1362_hcd->intl_queue);
+
+ seq_printf(s, "ISO:\n");
+
+ list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
+ seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
+ ep->interval, ep,
+ (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
+ ep->udev->devnum, ep->epnum,
+ (ep->epnum == 0) ? "" :
+ ((ep->nextpid == USB_PID_IN) ?
+ "in" : "out"), ep->maxpacket);
+ }
+
+ spin_unlock_irq(&isp1362_hcd->lock);
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int isp1362_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, isp1362_show, inode);
+}
+
+static const struct file_operations debug_ops = {
+ .open = isp1362_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* expect just one isp1362_hcd per system */
+static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+ isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
+ usb_debug_root,
+ isp1362_hcd, &debug_ops);
+}
+
+static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+ debugfs_remove(isp1362_hcd->debug_file);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
+{
+ int tmp = 20;
+
+ isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
+ isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
+ while (--tmp) {
+ mdelay(1);
+ if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
+ break;
+ }
+ if (!tmp)
+ pr_err("Software reset timeout\n");
+}
+
+static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ __isp1362_sw_reset(isp1362_hcd);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+}
+
+static int isp1362_mem_config(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+ u32 total;
+ u16 istl_size = ISP1362_ISTL_BUFSIZE;
+ u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
+ u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
+ u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
+ u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
+ u16 atl_size;
+ int i;
+
+ WARN_ON(istl_size & 3);
+ WARN_ON(atl_blksize & 3);
+ WARN_ON(intl_blksize & 3);
+ WARN_ON(atl_blksize < PTD_HEADER_SIZE);
+ WARN_ON(intl_blksize < PTD_HEADER_SIZE);
+
+ BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
+ if (atl_buffers > 32)
+ atl_buffers = 32;
+ atl_size = atl_buffers * atl_blksize;
+ total = atl_size + intl_size + istl_size;
+ dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
+ dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
+ istl_size / 2, istl_size, 0, istl_size / 2);
+ dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
+ ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
+ intl_size, istl_size);
+ dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
+ atl_buffers, atl_blksize - PTD_HEADER_SIZE,
+ atl_size, istl_size + intl_size);
+ dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
+ ISP1362_BUF_SIZE - total);
+
+ if (total > ISP1362_BUF_SIZE) {
+ dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
+ __func__, total, ISP1362_BUF_SIZE);
+ return -ENOMEM;
+ }
+
+ total = istl_size + intl_size + atl_size;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ for (i = 0; i < 2; i++) {
+ isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
+ isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
+ isp1362_hcd->istl_queue[i].blk_size = 4;
+ INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
+ snprintf(isp1362_hcd->istl_queue[i].name,
+ sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
+ DBG(3, "%s: %5s buf $%04x %d\n", __func__,
+ isp1362_hcd->istl_queue[i].name,
+ isp1362_hcd->istl_queue[i].buf_start,
+ isp1362_hcd->istl_queue[i].buf_size);
+ }
+ isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
+
+ isp1362_hcd->intl_queue.buf_start = istl_size;
+ isp1362_hcd->intl_queue.buf_size = intl_size;
+ isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
+ isp1362_hcd->intl_queue.blk_size = intl_blksize;
+ isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
+ isp1362_hcd->intl_queue.skip_map = ~0;
+ INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
+
+ isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
+ isp1362_hcd->intl_queue.buf_size);
+ isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
+ isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
+ isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
+ 1 << (ISP1362_INTL_BUFFERS - 1));
+
+ isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
+ isp1362_hcd->atl_queue.buf_size = atl_size;
+ isp1362_hcd->atl_queue.buf_count = atl_buffers;
+ isp1362_hcd->atl_queue.blk_size = atl_blksize;
+ isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
+ isp1362_hcd->atl_queue.skip_map = ~0;
+ INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
+
+ isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
+ isp1362_hcd->atl_queue.buf_size);
+ isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
+ isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
+ isp1362_write_reg32(isp1362_hcd, HCATLLAST,
+ 1 << (atl_buffers - 1));
+
+ snprintf(isp1362_hcd->atl_queue.name,
+ sizeof(isp1362_hcd->atl_queue.name), "ATL");
+ snprintf(isp1362_hcd->intl_queue.name,
+ sizeof(isp1362_hcd->intl_queue.name), "INTL");
+ DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
+ isp1362_hcd->intl_queue.name,
+ isp1362_hcd->intl_queue.buf_start,
+ ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
+ isp1362_hcd->intl_queue.buf_size);
+ DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
+ isp1362_hcd->atl_queue.name,
+ isp1362_hcd->atl_queue.buf_start,
+ atl_buffers, isp1362_hcd->atl_queue.blk_size,
+ isp1362_hcd->atl_queue.buf_size);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ return 0;
+}
+
+static int isp1362_hc_reset(struct usb_hcd *hcd)
+{
+ int ret = 0;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long t;
+ unsigned long timeout = 100;
+ unsigned long flags;
+ int clkrdy = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ if (isp1362_hcd->board && isp1362_hcd->board->reset) {
+ isp1362_hcd->board->reset(hcd->self.controller, 1);
+ msleep(20);
+ if (isp1362_hcd->board->clock)
+ isp1362_hcd->board->clock(hcd->self.controller, 1);
+ isp1362_hcd->board->reset(hcd->self.controller, 0);
+ } else
+ isp1362_sw_reset(isp1362_hcd);
+
+ /* chip has been reset. First we need to see a clock */
+ t = jiffies + msecs_to_jiffies(timeout);
+ while (!clkrdy && time_before_eq(jiffies, t)) {
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (!clkrdy)
+ msleep(4);
+ }
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (!clkrdy) {
+ pr_err("Clock not ready after %lums\n", timeout);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static void isp1362_hc_stop(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+ u32 tmp;
+
+ pr_debug("%s:\n", __func__);
+
+ del_timer_sync(&hcd->rh_timer);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+ /* Switch off power for all ports */
+ tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
+ tmp &= ~(RH_A_NPS | RH_A_PSM);
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
+
+ /* Reset the chip */
+ if (isp1362_hcd->board && isp1362_hcd->board->reset)
+ isp1362_hcd->board->reset(hcd->self.controller, 1);
+ else
+ __isp1362_sw_reset(isp1362_hcd);
+
+ if (isp1362_hcd->board && isp1362_hcd->board->clock)
+ isp1362_hcd->board->clock(hcd->self.controller, 0);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+}
+
+#ifdef CHIP_BUFFER_TEST
+static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
+{
+ int ret = 0;
+ u16 *ref;
+ unsigned long flags;
+
+ ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
+ if (ref) {
+ int offset;
+ u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
+
+ for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
+ ref[offset] = ~offset;
+ tst[offset] = offset;
+ }
+
+ for (offset = 0; offset < 4; offset++) {
+ int j;
+
+ for (j = 0; j < 8; j++) {
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
+ isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ if (memcmp(ref, tst, j)) {
+ ret = -ENODEV;
+ pr_err("%s: memory check with %d byte offset %d failed\n",
+ __func__, j, offset);
+ dump_data((u8 *)ref + offset, j);
+ dump_data((u8 *)tst + offset, j);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
+ isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
+ ret = -ENODEV;
+ pr_err("%s: memory check failed\n", __func__);
+ dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
+ }
+
+ for (offset = 0; offset < 256; offset++) {
+ int test_size = 0;
+
+ yield();
+
+ memset(tst, 0, ISP1362_BUF_SIZE);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+ isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
+ ISP1362_BUF_SIZE / 2)) {
+ pr_err("%s: Failed to clear buffer\n", __func__);
+ dump_data((u8 *)tst, ISP1362_BUF_SIZE);
+ break;
+ }
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
+ isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
+ offset * 2 + PTD_HEADER_SIZE, test_size);
+ isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
+ PTD_HEADER_SIZE + test_size);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
+ dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
+ dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
+ PTD_HEADER_SIZE + test_size);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
+ ret = -ENODEV;
+ pr_err("%s: memory check with offset %02x failed\n",
+ __func__, offset);
+ break;
+ }
+ pr_warning("%s: memory check with offset %02x ok after second read\n",
+ __func__, offset);
+ }
+ }
+ kfree(ref);
+ }
+ return ret;
+}
+#endif
+
+static int isp1362_hc_start(struct usb_hcd *hcd)
+{
+ int ret;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct isp1362_platform_data *board = isp1362_hcd->board;
+ u16 hwcfg;
+ u16 chipid;
+ unsigned long flags;
+
+ pr_debug("%s:\n", __func__);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
+ pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
+ return -ENODEV;
+ }
+
+#ifdef CHIP_BUFFER_TEST
+ ret = isp1362_chip_test(isp1362_hcd);
+ if (ret)
+ return -ENODEV;
+#endif
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ /* clear interrupt status and disable all interrupt sources */
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+ /* HW conf */
+ hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
+ if (board->sel15Kres)
+ hwcfg |= HCHWCFG_PULLDOWN_DS2 |
+ ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
+ if (board->clknotstop)
+ hwcfg |= HCHWCFG_CLKNOTSTOP;
+ if (board->oc_enable)
+ hwcfg |= HCHWCFG_ANALOG_OC;
+ if (board->int_act_high)
+ hwcfg |= HCHWCFG_INT_POL;
+ if (board->int_edge_triggered)
+ hwcfg |= HCHWCFG_INT_TRIGGER;
+ if (board->dreq_act_high)
+ hwcfg |= HCHWCFG_DREQ_POL;
+ if (board->dack_act_high)
+ hwcfg |= HCHWCFG_DACK_POL;
+ isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
+ isp1362_show_reg(isp1362_hcd, HCHWCFG);
+ isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ ret = isp1362_mem_config(hcd);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ /* Root hub conf */
+ isp1362_hcd->rhdesca = 0;
+ if (board->no_power_switching)
+ isp1362_hcd->rhdesca |= RH_A_NPS;
+ if (board->power_switching_mode)
+ isp1362_hcd->rhdesca |= RH_A_PSM;
+ if (board->potpg)
+ isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
+ else
+ isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
+
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
+ isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
+
+ isp1362_hcd->rhdescb = RH_B_PPCM;
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
+ isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
+
+ isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
+ isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
+ isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ isp1362_hcd->hc_control = OHCI_USB_OPER;
+ hcd->state = HC_STATE_RUNNING;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ /* Set up interrupts */
+ isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
+ isp1362_hcd->intenb |= OHCI_INTR_RD;
+ isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
+ isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+
+ /* Go operational */
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ /* enable global power */
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct hc_driver isp1362_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "ISP1362 Host Controller",
+ .hcd_priv_size = sizeof(struct isp1362_hcd),
+
+ .irq = isp1362_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ .reset = isp1362_hc_reset,
+ .start = isp1362_hc_start,
+ .stop = isp1362_hc_stop,
+
+ .urb_enqueue = isp1362_urb_enqueue,
+ .urb_dequeue = isp1362_urb_dequeue,
+ .endpoint_disable = isp1362_endpoint_disable,
+
+ .get_frame_number = isp1362_get_frame,
+
+ .hub_status_data = isp1362_hub_status_data,
+ .hub_control = isp1362_hub_control,
+ .bus_suspend = isp1362_bus_suspend,
+ .bus_resume = isp1362_bus_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int isp1362_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct resource *res;
+
+ remove_debug_file(isp1362_hcd);
+ DBG(0, "%s: Removing HCD\n", __func__);
+ usb_remove_hcd(hcd);
+
+ DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
+ isp1362_hcd->data_reg);
+ iounmap(isp1362_hcd->data_reg);
+
+ DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
+ isp1362_hcd->addr_reg);
+ iounmap(isp1362_hcd->addr_reg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ DBG(0, "%s: put_hcd\n", __func__);
+ usb_put_hcd(hcd);
+ DBG(0, "%s: Done\n", __func__);
+
+ return 0;
+}
+
+static int isp1362_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct isp1362_hcd *isp1362_hcd;
+ struct resource *addr, *data;
+ void __iomem *addr_reg;
+ void __iomem *data_reg;
+ int irq;
+ int retval = 0;
+ struct resource *irq_res;
+ unsigned int irq_flags = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /* basic sanity checks first. board-specific init logic should
+ * have initialized this the three resources and probably board
+ * specific platform_data. we don't probe for IRQs, and do only
+ * minimal sanity checking.
+ */
+ if (pdev->num_resources < 3) {
+ retval = -ENODEV;
+ goto err1;
+ }
+
+ data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!addr || !data || !irq_res) {
+ retval = -ENODEV;
+ goto err1;
+ }
+ irq = irq_res->start;
+
+ if (pdev->dev.dma_mask) {
+ DBG(1, "won't do DMA");
+ retval = -ENODEV;
+ goto err1;
+ }
+
+ if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
+ retval = -EBUSY;
+ goto err1;
+ }
+ addr_reg = ioremap(addr->start, resource_size(addr));
+ if (addr_reg == NULL) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
+ retval = -EBUSY;
+ goto err3;
+ }
+ data_reg = ioremap(data->start, resource_size(data));
+ if (data_reg == NULL) {
+ retval = -ENOMEM;
+ goto err4;
+ }
+
+ /* allocate and initialize hcd */
+ hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto err5;
+ }
+ hcd->rsrc_start = data->start;
+ isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ isp1362_hcd->data_reg = data_reg;
+ isp1362_hcd->addr_reg = addr_reg;
+
+ isp1362_hcd->next_statechange = jiffies;
+ spin_lock_init(&isp1362_hcd->lock);
+ INIT_LIST_HEAD(&isp1362_hcd->async);
+ INIT_LIST_HEAD(&isp1362_hcd->periodic);
+ INIT_LIST_HEAD(&isp1362_hcd->isoc);
+ INIT_LIST_HEAD(&isp1362_hcd->remove_list);
+ isp1362_hcd->board = dev_get_platdata(&pdev->dev);
+#if USE_PLATFORM_DELAY
+ if (!isp1362_hcd->board->delay) {
+ dev_err(hcd->self.controller, "No platform delay function given\n");
+ retval = -ENODEV;
+ goto err6;
+ }
+#endif
+
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
+ irq_flags |= IRQF_TRIGGER_RISING;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
+ irq_flags |= IRQF_TRIGGER_FALLING;
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
+ irq_flags |= IRQF_TRIGGER_HIGH;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
+ irq_flags |= IRQF_TRIGGER_LOW;
+
+ retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
+ if (retval != 0)
+ goto err6;
+ device_wakeup_enable(hcd->self.controller);
+
+ pr_info("%s, irq %d\n", hcd->product_desc, irq);
+
+ create_debug_file(isp1362_hcd);
+
+ return 0;
+
+ err6:
+ DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
+ usb_put_hcd(hcd);
+ err5:
+ DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
+ iounmap(data_reg);
+ err4:
+ DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
+ release_mem_region(data->start, resource_size(data));
+ err3:
+ DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
+ iounmap(addr_reg);
+ err2:
+ DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
+ release_mem_region(addr->start, resource_size(addr));
+ err1:
+ pr_err("%s: init error, %d\n", __func__, retval);
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+ int retval = 0;
+
+ DBG(0, "%s: Suspending device\n", __func__);
+
+ if (state.event == PM_EVENT_FREEZE) {
+ DBG(0, "%s: Suspending root hub\n", __func__);
+ retval = isp1362_bus_suspend(hcd);
+ } else {
+ DBG(0, "%s: Suspending RH ports\n", __func__);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ }
+ if (retval == 0)
+ pdev->dev.power.power_state = state;
+ return retval;
+}
+
+static int isp1362_resume(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+
+ DBG(0, "%s: Resuming\n", __func__);
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ DBG(0, "%s: Resume RH ports\n", __func__);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return 0;
+ }
+
+ pdev->dev.power.power_state = PMSG_ON;
+
+ return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
+}
+#else
+#define isp1362_suspend NULL
+#define isp1362_resume NULL
+#endif
+
+static struct platform_driver isp1362_driver = {
+ .probe = isp1362_probe,
+ .remove = isp1362_remove,
+
+ .suspend = isp1362_suspend,
+ .resume = isp1362_resume,
+ .driver = {
+ .name = hcd_name,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(isp1362_driver);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
new file mode 100644
index 00000000000..3b0b4847c3a
--- /dev/null
+++ b/drivers/usb/host/isp1362.h
@@ -0,0 +1,1014 @@
+/*
+ * ISP1362 HCD (Host Controller Driver) for USB.
+ *
+ * COPYRIGHT (C) by L. Wassmann <LW@KARO-electronics.de>
+ */
+
+/* ------------------------------------------------------------------------- */
+/*
+ * Platform specific compile time options
+ */
+#if defined(CONFIG_BLACKFIN)
+
+#include <linux/io.h>
+#define USE_32BIT 0
+#define MAX_ROOT_PORTS 2
+#define USE_PLATFORM_DELAY 0
+#define USE_NDELAY 1
+
+#define DUMMY_DELAY_ACCESS \
+ do { \
+ bfin_read16(ASYNC_BANK0_BASE); \
+ bfin_read16(ASYNC_BANK0_BASE); \
+ bfin_read16(ASYNC_BANK0_BASE); \
+ } while (0)
+
+#undef insw
+#undef outsw
+
+#define insw delayed_insw
+#define outsw delayed_outsw
+
+static inline void delayed_outsw(unsigned int addr, void *buf, int len)
+{
+ unsigned short *bp = (unsigned short *)buf;
+ while (len--) {
+ DUMMY_DELAY_ACCESS;
+ outw(*bp++, addr);
+ }
+}
+
+static inline void delayed_insw(unsigned int addr, void *buf, int len)
+{
+ unsigned short *bp = (unsigned short *)buf;
+ while (len--) {
+ DUMMY_DELAY_ACCESS;
+ *bp++ = inw(addr);
+ }
+}
+
+#else
+
+#define MAX_ROOT_PORTS 2
+
+#define USE_32BIT 0
+
+/* These options are mutually eclusive */
+#define USE_PLATFORM_DELAY 0
+#define USE_NDELAY 0
+
+#define DUMMY_DELAY_ACCESS do {} while (0)
+
+#endif
+
+
+/* ------------------------------------------------------------------------- */
+
+#define USB_RESET_WIDTH 50
+#define MAX_XFER_SIZE 1023
+
+/* Buffer sizes */
+#define ISP1362_BUF_SIZE 4096
+#define ISP1362_ISTL_BUFSIZE 512
+#define ISP1362_INTL_BLKSIZE 64
+#define ISP1362_INTL_BUFFERS 16
+#define ISP1362_ATL_BLKSIZE 64
+
+#define ISP1362_REG_WRITE_OFFSET 0x80
+
+#define REG_WIDTH_16 0x000
+#define REG_WIDTH_32 0x100
+#define REG_WIDTH_MASK 0x100
+#define REG_NO_MASK 0x0ff
+
+#ifdef ISP1362_DEBUG
+typedef const unsigned int isp1362_reg_t;
+
+#define REG_ACCESS_R 0x200
+#define REG_ACCESS_W 0x400
+#define REG_ACCESS_RW 0x600
+#define REG_ACCESS_MASK 0x600
+
+#define ISP1362_REG_NO(r) ((r) & REG_NO_MASK)
+
+#define ISP1362_REG(name, addr, width, rw) \
+static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
+
+#define REG_ACCESS_TEST(r) BUG_ON(((r) & ISP1362_REG_WRITE_OFFSET) && !((r) & REG_ACCESS_W))
+#define REG_WIDTH_TEST(r, w) BUG_ON(((r) & REG_WIDTH_MASK) != (w))
+#else
+typedef const unsigned char isp1362_reg_t;
+#define ISP1362_REG_NO(r) (r)
+
+#define ISP1362_REG(name, addr, width, rw) \
+static isp1362_reg_t ISP1362_REG_##name = addr
+
+#define REG_ACCESS_TEST(r) do {} while (0)
+#define REG_WIDTH_TEST(r, w) do {} while (0)
+#endif
+
+/* OHCI compatible registers */
+/*
+ * Note: Some of the ISP1362 'OHCI' registers implement only
+ * a subset of the bits defined in the OHCI spec.
+ *
+ * Bitmasks for the individual bits of these registers are defined in "ohci.h"
+ */
+ISP1362_REG(HCREVISION, 0x00, REG_WIDTH_32, REG_ACCESS_R);
+ISP1362_REG(HCCONTROL, 0x01, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCCMDSTAT, 0x02, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTSTAT, 0x03, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTENB, 0x04, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTDIS, 0x05, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCFMINTVL, 0x0d, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCFMREM, 0x0e, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCFMNUM, 0x0f, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCLSTHRESH, 0x11, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHDESCA, 0x12, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHDESCB, 0x13, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHSTATUS, 0x14, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHPORT1, 0x15, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHPORT2, 0x16, REG_WIDTH_32, REG_ACCESS_RW);
+
+/* Philips ISP1362 specific registers */
+ISP1362_REG(HCHWCFG, 0x20, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCHWCFG_DISABLE_SUSPEND (1 << 15)
+#define HCHWCFG_GLOBAL_PWRDOWN (1 << 14)
+#define HCHWCFG_PULLDOWN_DS2 (1 << 13)
+#define HCHWCFG_PULLDOWN_DS1 (1 << 12)
+#define HCHWCFG_CLKNOTSTOP (1 << 11)
+#define HCHWCFG_ANALOG_OC (1 << 10)
+#define HCHWCFG_ONEINT (1 << 9)
+#define HCHWCFG_DACK_MODE (1 << 8)
+#define HCHWCFG_ONEDMA (1 << 7)
+#define HCHWCFG_DACK_POL (1 << 6)
+#define HCHWCFG_DREQ_POL (1 << 5)
+#define HCHWCFG_DBWIDTH_MASK (0x03 << 3)
+#define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK)
+#define HCHWCFG_INT_POL (1 << 2)
+#define HCHWCFG_INT_TRIGGER (1 << 1)
+#define HCHWCFG_INT_ENABLE (1 << 0)
+
+ISP1362_REG(HCDMACFG, 0x21, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCDMACFG_CTR_ENABLE (1 << 7)
+#define HCDMACFG_BURST_LEN_MASK (0x03 << 5)
+#define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK)
+#define HCDMACFG_BURST_LEN_1 HCDMACFG_BURST_LEN(0)
+#define HCDMACFG_BURST_LEN_4 HCDMACFG_BURST_LEN(1)
+#define HCDMACFG_BURST_LEN_8 HCDMACFG_BURST_LEN(2)
+#define HCDMACFG_DMA_ENABLE (1 << 4)
+#define HCDMACFG_BUF_TYPE_MASK (0x07 << 1)
+#define HCDMACFG_BUF_TYPE(n) (((n) << 1) & HCDMACFG_BUF_TYPE_MASK)
+#define HCDMACFG_BUF_ISTL0 HCDMACFG_BUF_TYPE(0)
+#define HCDMACFG_BUF_ISTL1 HCDMACFG_BUF_TYPE(1)
+#define HCDMACFG_BUF_INTL HCDMACFG_BUF_TYPE(2)
+#define HCDMACFG_BUF_ATL HCDMACFG_BUF_TYPE(3)
+#define HCDMACFG_BUF_DIRECT HCDMACFG_BUF_TYPE(4)
+#define HCDMACFG_DMA_RW_SELECT (1 << 0)
+
+ISP1362_REG(HCXFERCTR, 0x22, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCuPINT, 0x24, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCuPINT_SOF (1 << 0)
+#define HCuPINT_ISTL0 (1 << 1)
+#define HCuPINT_ISTL1 (1 << 2)
+#define HCuPINT_EOT (1 << 3)
+#define HCuPINT_OPR (1 << 4)
+#define HCuPINT_SUSP (1 << 5)
+#define HCuPINT_CLKRDY (1 << 6)
+#define HCuPINT_INTL (1 << 7)
+#define HCuPINT_ATL (1 << 8)
+#define HCuPINT_OTG (1 << 9)
+
+ISP1362_REG(HCuPINTENB, 0x25, REG_WIDTH_16, REG_ACCESS_RW);
+/* same bit definitions apply as for HCuPINT */
+
+ISP1362_REG(HCCHIPID, 0x27, REG_WIDTH_16, REG_ACCESS_R);
+#define HCCHIPID_MASK 0xff00
+#define HCCHIPID_MAGIC 0x3600
+
+ISP1362_REG(HCSCRATCH, 0x28, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCSWRES, 0x29, REG_WIDTH_16, REG_ACCESS_W);
+#define HCSWRES_MAGIC 0x00f6
+
+ISP1362_REG(HCBUFSTAT, 0x2c, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCBUFSTAT_ISTL0_FULL (1 << 0)
+#define HCBUFSTAT_ISTL1_FULL (1 << 1)
+#define HCBUFSTAT_INTL_ACTIVE (1 << 2)
+#define HCBUFSTAT_ATL_ACTIVE (1 << 3)
+#define HCBUFSTAT_RESET_HWPP (1 << 4)
+#define HCBUFSTAT_ISTL0_ACTIVE (1 << 5)
+#define HCBUFSTAT_ISTL1_ACTIVE (1 << 6)
+#define HCBUFSTAT_ISTL0_DONE (1 << 8)
+#define HCBUFSTAT_ISTL1_DONE (1 << 9)
+#define HCBUFSTAT_PAIRED_PTDPP (1 << 10)
+
+ISP1362_REG(HCDIRADDR, 0x32, REG_WIDTH_32, REG_ACCESS_RW);
+#define HCDIRADDR_ADDR_MASK 0x0000ffff
+#define HCDIRADDR_ADDR(n) (((n) << 0) & HCDIRADDR_ADDR_MASK)
+#define HCDIRADDR_COUNT_MASK 0xffff0000
+#define HCDIRADDR_COUNT(n) (((n) << 16) & HCDIRADDR_COUNT_MASK)
+ISP1362_REG(HCDIRDATA, 0x45, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCISTLBUFSZ, 0x30, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCISTL0PORT, 0x40, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCISTL1PORT, 0x42, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCISTLRATE, 0x47, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCINTLBUFSZ, 0x33, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCINTLPORT, 0x43, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCINTLBLKSZ, 0x53, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCINTLDONE, 0x17, REG_WIDTH_32, REG_ACCESS_R);
+ISP1362_REG(HCINTLSKIP, 0x18, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTLLAST, 0x19, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTLCURR, 0x1a, REG_WIDTH_16, REG_ACCESS_R);
+
+ISP1362_REG(HCATLBUFSZ, 0x34, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLPORT, 0x44, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLBLKSZ, 0x54, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLDONE, 0x1b, REG_WIDTH_32, REG_ACCESS_R);
+ISP1362_REG(HCATLSKIP, 0x1c, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCATLLAST, 0x1d, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCATLCURR, 0x1e, REG_WIDTH_16, REG_ACCESS_R);
+
+ISP1362_REG(HCATLDTC, 0x51, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLDTCTO, 0x52, REG_WIDTH_16, REG_ACCESS_RW);
+
+
+ISP1362_REG(OTGCONTROL, 0x62, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGSTATUS, 0x67, REG_WIDTH_16, REG_ACCESS_R);
+ISP1362_REG(OTGINT, 0x68, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGINTENB, 0x69, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGTIMER, 0x6A, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGALTTMR, 0x6C, REG_WIDTH_16, REG_ACCESS_RW);
+
+/* Philips transfer descriptor, cpu-endian */
+struct ptd {
+ u16 count;
+#define PTD_COUNT_MSK (0x3ff << 0)
+#define PTD_TOGGLE_MSK (1 << 10)
+#define PTD_ACTIVE_MSK (1 << 11)
+#define PTD_CC_MSK (0xf << 12)
+ u16 mps;
+#define PTD_MPS_MSK (0x3ff << 0)
+#define PTD_SPD_MSK (1 << 10)
+#define PTD_LAST_MSK (1 << 11)
+#define PTD_EP_MSK (0xf << 12)
+ u16 len;
+#define PTD_LEN_MSK (0x3ff << 0)
+#define PTD_DIR_MSK (3 << 10)
+#define PTD_DIR_SETUP (0)
+#define PTD_DIR_OUT (1)
+#define PTD_DIR_IN (2)
+ u16 faddr;
+#define PTD_FA_MSK (0x7f << 0)
+/* PTD Byte 7: [StartingFrame (if ISO PTD) | StartingFrame[0..4], PollingRate[0..2] (if INT PTD)] */
+#define PTD_SF_ISO_MSK (0xff << 8)
+#define PTD_SF_INT_MSK (0x1f << 8)
+#define PTD_PR_MSK (0x07 << 13)
+} __attribute__ ((packed, aligned(2)));
+#define PTD_HEADER_SIZE sizeof(struct ptd)
+
+/* ------------------------------------------------------------------------- */
+/* Copied from ohci.h: */
+/*
+ * Hardware transfer status codes -- CC from PTD
+ */
+#define PTD_CC_NOERROR 0x00
+#define PTD_CC_CRC 0x01
+#define PTD_CC_BITSTUFFING 0x02
+#define PTD_CC_DATATOGGLEM 0x03
+#define PTD_CC_STALL 0x04
+#define PTD_DEVNOTRESP 0x05
+#define PTD_PIDCHECKFAIL 0x06
+#define PTD_UNEXPECTEDPID 0x07
+#define PTD_DATAOVERRUN 0x08
+#define PTD_DATAUNDERRUN 0x09
+ /* 0x0A, 0x0B reserved for hardware */
+#define PTD_BUFFEROVERRUN 0x0C
+#define PTD_BUFFERUNDERRUN 0x0D
+ /* 0x0E, 0x0F reserved for HCD */
+#define PTD_NOTACCESSED 0x0F
+
+
+/* map OHCI TD status codes (CC) to errno values */
+static const int cc_to_error[16] = {
+ /* No Error */ 0,
+ /* CRC Error */ -EILSEQ,
+ /* Bit Stuff */ -EPROTO,
+ /* Data Togg */ -EILSEQ,
+ /* Stall */ -EPIPE,
+ /* DevNotResp */ -ETIMEDOUT,
+ /* PIDCheck */ -EPROTO,
+ /* UnExpPID */ -EPROTO,
+ /* DataOver */ -EOVERFLOW,
+ /* DataUnder */ -EREMOTEIO,
+ /* (for hw) */ -EIO,
+ /* (for hw) */ -EIO,
+ /* BufferOver */ -ECOMM,
+ /* BuffUnder */ -ENOSR,
+ /* (for HCD) */ -EALREADY,
+ /* (for HCD) */ -EALREADY
+};
+
+
+/*
+ * HcControl (control) register masks
+ */
+#define OHCI_CTRL_HCFS (3 << 6) /* host controller functional state */
+#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
+#define OHCI_CTRL_RWE (1 << 10) /* remote wakeup enable */
+
+/* pre-shifted values for HCFS */
+# define OHCI_USB_RESET (0 << 6)
+# define OHCI_USB_RESUME (1 << 6)
+# define OHCI_USB_OPER (2 << 6)
+# define OHCI_USB_SUSPEND (3 << 6)
+
+/*
+ * HcCommandStatus (cmdstatus) register masks
+ */
+#define OHCI_HCR (1 << 0) /* host controller reset */
+#define OHCI_SOC (3 << 16) /* scheduling overrun count */
+
+/*
+ * masks used with interrupt registers:
+ * HcInterruptStatus (intrstatus)
+ * HcInterruptEnable (intrenable)
+ * HcInterruptDisable (intrdisable)
+ */
+#define OHCI_INTR_SO (1 << 0) /* scheduling overrun */
+#define OHCI_INTR_WDH (1 << 1) /* writeback of done_head */
+#define OHCI_INTR_SF (1 << 2) /* start frame */
+#define OHCI_INTR_RD (1 << 3) /* resume detect */
+#define OHCI_INTR_UE (1 << 4) /* unrecoverable error */
+#define OHCI_INTR_FNO (1 << 5) /* frame number overflow */
+#define OHCI_INTR_RHSC (1 << 6) /* root hub status change */
+#define OHCI_INTR_OC (1 << 30) /* ownership change */
+#define OHCI_INTR_MIE (1 << 31) /* master interrupt enable */
+
+/* roothub.portstatus [i] bits */
+#define RH_PS_CCS 0x00000001 /* current connect status */
+#define RH_PS_PES 0x00000002 /* port enable status*/
+#define RH_PS_PSS 0x00000004 /* port suspend status */
+#define RH_PS_POCI 0x00000008 /* port over current indicator */
+#define RH_PS_PRS 0x00000010 /* port reset status */
+#define RH_PS_PPS 0x00000100 /* port power status */
+#define RH_PS_LSDA 0x00000200 /* low speed device attached */
+#define RH_PS_CSC 0x00010000 /* connect status change */
+#define RH_PS_PESC 0x00020000 /* port enable status change */
+#define RH_PS_PSSC 0x00040000 /* port suspend status change */
+#define RH_PS_OCIC 0x00080000 /* over current indicator change */
+#define RH_PS_PRSC 0x00100000 /* port reset status change */
+
+/* roothub.status bits */
+#define RH_HS_LPS 0x00000001 /* local power status */
+#define RH_HS_OCI 0x00000002 /* over current indicator */
+#define RH_HS_DRWE 0x00008000 /* device remote wakeup enable */
+#define RH_HS_LPSC 0x00010000 /* local power status change */
+#define RH_HS_OCIC 0x00020000 /* over current indicator change */
+#define RH_HS_CRWE 0x80000000 /* clear remote wakeup enable */
+
+/* roothub.b masks */
+#define RH_B_DR 0x0000ffff /* device removable flags */
+#define RH_B_PPCM 0xffff0000 /* port power control mask */
+
+/* roothub.a masks */
+#define RH_A_NDP (0xff << 0) /* number of downstream ports */
+#define RH_A_PSM (1 << 8) /* power switching mode */
+#define RH_A_NPS (1 << 9) /* no power switching */
+#define RH_A_DT (1 << 10) /* device type (mbz) */
+#define RH_A_OCPM (1 << 11) /* over current protection mode */
+#define RH_A_NOCP (1 << 12) /* no over current protection */
+#define RH_A_POTPGT (0xff << 24) /* power on to power good time */
+
+#define FI 0x2edf /* 12000 bits per frame (-1) */
+#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
+#define LSTHRESH 0x628 /* lowspeed bit threshold */
+
+/* ------------------------------------------------------------------------- */
+
+/* PTD accessor macros. */
+#define PTD_GET_COUNT(p) (((p)->count & PTD_COUNT_MSK) >> 0)
+#define PTD_COUNT(v) (((v) << 0) & PTD_COUNT_MSK)
+#define PTD_GET_TOGGLE(p) (((p)->count & PTD_TOGGLE_MSK) >> 10)
+#define PTD_TOGGLE(v) (((v) << 10) & PTD_TOGGLE_MSK)
+#define PTD_GET_ACTIVE(p) (((p)->count & PTD_ACTIVE_MSK) >> 11)
+#define PTD_ACTIVE(v) (((v) << 11) & PTD_ACTIVE_MSK)
+#define PTD_GET_CC(p) (((p)->count & PTD_CC_MSK) >> 12)
+#define PTD_CC(v) (((v) << 12) & PTD_CC_MSK)
+#define PTD_GET_MPS(p) (((p)->mps & PTD_MPS_MSK) >> 0)
+#define PTD_MPS(v) (((v) << 0) & PTD_MPS_MSK)
+#define PTD_GET_SPD(p) (((p)->mps & PTD_SPD_MSK) >> 10)
+#define PTD_SPD(v) (((v) << 10) & PTD_SPD_MSK)
+#define PTD_GET_LAST(p) (((p)->mps & PTD_LAST_MSK) >> 11)
+#define PTD_LAST(v) (((v) << 11) & PTD_LAST_MSK)
+#define PTD_GET_EP(p) (((p)->mps & PTD_EP_MSK) >> 12)
+#define PTD_EP(v) (((v) << 12) & PTD_EP_MSK)
+#define PTD_GET_LEN(p) (((p)->len & PTD_LEN_MSK) >> 0)
+#define PTD_LEN(v) (((v) << 0) & PTD_LEN_MSK)
+#define PTD_GET_DIR(p) (((p)->len & PTD_DIR_MSK) >> 10)
+#define PTD_DIR(v) (((v) << 10) & PTD_DIR_MSK)
+#define PTD_GET_FA(p) (((p)->faddr & PTD_FA_MSK) >> 0)
+#define PTD_FA(v) (((v) << 0) & PTD_FA_MSK)
+#define PTD_GET_SF_INT(p) (((p)->faddr & PTD_SF_INT_MSK) >> 8)
+#define PTD_SF_INT(v) (((v) << 8) & PTD_SF_INT_MSK)
+#define PTD_GET_SF_ISO(p) (((p)->faddr & PTD_SF_ISO_MSK) >> 8)
+#define PTD_SF_ISO(v) (((v) << 8) & PTD_SF_ISO_MSK)
+#define PTD_GET_PR(p) (((p)->faddr & PTD_PR_MSK) >> 13)
+#define PTD_PR(v) (((v) << 13) & PTD_PR_MSK)
+
+#define LOG2_PERIODIC_SIZE 5 /* arbitrary; this matches OHCI */
+#define PERIODIC_SIZE (1 << LOG2_PERIODIC_SIZE)
+
+struct isp1362_ep {
+ struct usb_host_endpoint *hep;
+ struct usb_device *udev;
+
+ /* philips transfer descriptor */
+ struct ptd ptd;
+
+ u8 maxpacket;
+ u8 epnum;
+ u8 nextpid;
+ u16 error_count;
+ u16 length; /* of current packet */
+ s16 ptd_offset; /* buffer offset in ISP1362 where
+ PTD has been stored
+ (for access thru HCDIRDATA) */
+ int ptd_index;
+ int num_ptds;
+ void *data; /* to databuf */
+ /* queue of active EPs (the ones transmitted to the chip) */
+ struct list_head active;
+
+ /* periodic schedule */
+ u8 branch;
+ u16 interval;
+ u16 load;
+ u16 last_iso;
+
+ /* async schedule */
+ struct list_head schedule; /* list of all EPs that need processing */
+ struct list_head remove_list;
+ int num_req;
+};
+
+struct isp1362_ep_queue {
+ struct list_head active; /* list of PTDs currently processed by HC */
+ atomic_t finishing;
+ unsigned long buf_map;
+ unsigned long skip_map;
+ int free_ptd;
+ u16 buf_start;
+ u16 buf_size;
+ u16 blk_size; /* PTD buffer block size for ATL and INTL */
+ u8 buf_count;
+ u8 buf_avail;
+ char name[16];
+
+ /* for statistical tracking */
+ u8 stat_maxptds; /* Max # of ptds seen simultaneously in fifo */
+ u8 ptd_count; /* number of ptds submitted to this queue */
+};
+
+struct isp1362_hcd {
+ spinlock_t lock;
+ void __iomem *addr_reg;
+ void __iomem *data_reg;
+
+ struct isp1362_platform_data *board;
+
+ struct dentry *debug_file;
+ unsigned long stat1, stat2, stat4, stat8, stat16;
+
+ /* HC registers */
+ u32 intenb; /* "OHCI" interrupts */
+ u16 irqenb; /* uP interrupts */
+
+ /* Root hub registers */
+ u32 rhdesca;
+ u32 rhdescb;
+ u32 rhstatus;
+ u32 rhport[MAX_ROOT_PORTS];
+ unsigned long next_statechange;
+
+ /* HC control reg shadow copy */
+ u32 hc_control;
+
+ /* async schedule: control, bulk */
+ struct list_head async;
+
+ /* periodic schedule: int */
+ u16 load[PERIODIC_SIZE];
+ struct list_head periodic;
+ u16 fmindex;
+
+ /* periodic schedule: isochronous */
+ struct list_head isoc;
+ unsigned int istl_flip:1;
+ unsigned int irq_active:1;
+
+ /* Schedules for the current frame */
+ struct isp1362_ep_queue atl_queue;
+ struct isp1362_ep_queue intl_queue;
+ struct isp1362_ep_queue istl_queue[2];
+
+ /* list of PTDs retrieved from HC */
+ struct list_head remove_list;
+ enum {
+ ISP1362_INT_SOF,
+ ISP1362_INT_ISTL0,
+ ISP1362_INT_ISTL1,
+ ISP1362_INT_EOT,
+ ISP1362_INT_OPR,
+ ISP1362_INT_SUSP,
+ ISP1362_INT_CLKRDY,
+ ISP1362_INT_INTL,
+ ISP1362_INT_ATL,
+ ISP1362_INT_OTG,
+ NUM_ISP1362_IRQS
+ } IRQ_NAMES;
+ unsigned int irq_stat[NUM_ISP1362_IRQS];
+ int req_serial;
+};
+
+static inline const char *ISP1362_INT_NAME(int n)
+{
+ switch (n) {
+ case ISP1362_INT_SOF: return "SOF";
+ case ISP1362_INT_ISTL0: return "ISTL0";
+ case ISP1362_INT_ISTL1: return "ISTL1";
+ case ISP1362_INT_EOT: return "EOT";
+ case ISP1362_INT_OPR: return "OPR";
+ case ISP1362_INT_SUSP: return "SUSP";
+ case ISP1362_INT_CLKRDY: return "CLKRDY";
+ case ISP1362_INT_INTL: return "INTL";
+ case ISP1362_INT_ATL: return "ATL";
+ case ISP1362_INT_OTG: return "OTG";
+ default: return "unknown";
+ }
+}
+
+static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr)
+{
+ unsigned long p = (unsigned long)ptr;
+ if (!(p & 0xf))
+ isp1362_hcd->stat16++;
+ else if (!(p & 0x7))
+ isp1362_hcd->stat8++;
+ else if (!(p & 0x3))
+ isp1362_hcd->stat4++;
+ else if (!(p & 0x1))
+ isp1362_hcd->stat2++;
+ else
+ isp1362_hcd->stat1++;
+}
+
+static inline struct isp1362_hcd *hcd_to_isp1362_hcd(struct usb_hcd *hcd)
+{
+ return (struct isp1362_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd)
+{
+ return container_of((void *)isp1362_hcd, struct usb_hcd, hcd_priv);
+}
+
+#define frame_before(f1, f2) ((s16)((u16)f1 - (u16)f2) < 0)
+
+/*
+ * ISP1362 HW Interface
+ */
+
+#define DBG(level, fmt...) \
+ do { \
+ if (dbg_level > level) \
+ pr_debug(fmt); \
+ } while (0)
+
+#ifdef VERBOSE
+# define VDBG(fmt...) DBG(3, fmt)
+#else
+# define VDBG(fmt...) do {} while (0)
+#endif
+
+#ifdef REGISTERS
+# define RDBG(fmt...) DBG(1, fmt)
+#else
+# define RDBG(fmt...) do {} while (0)
+#endif
+
+#ifdef URB_TRACE
+#define URB_DBG(fmt...) DBG(0, fmt)
+#else
+#define URB_DBG(fmt...) do {} while (0)
+#endif
+
+
+#if USE_PLATFORM_DELAY
+#if USE_NDELAY
+#error USE_PLATFORM_DELAY and USE_NDELAY defined simultaneously.
+#endif
+#define isp1362_delay(h, d) (h)->board->delay(isp1362_hcd_to_hcd(h)->self.controller, d)
+#elif USE_NDELAY
+#define isp1362_delay(h, d) ndelay(d)
+#else
+#define isp1362_delay(h, d) do {} while (0)
+#endif
+
+#define get_urb(ep) ({ \
+ BUG_ON(list_empty(&ep->hep->urb_list)); \
+ container_of(ep->hep->urb_list.next, struct urb, urb_list); \
+})
+
+/* basic access functions for ISP1362 chip registers */
+/* NOTE: The contents of the address pointer register cannot be read back! The driver must ensure,
+ * that all register accesses are performed with interrupts disabled, since the interrupt
+ * handler has no way of restoring the previous state.
+ */
+static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg)
+{
+ REG_ACCESS_TEST(reg);
+ DUMMY_DELAY_ACCESS;
+ writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg);
+ DUMMY_DELAY_ACCESS;
+ isp1362_delay(isp1362_hcd, 1);
+}
+
+static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val)
+{
+ DUMMY_DELAY_ACCESS;
+ writew(val, isp1362_hcd->data_reg);
+}
+
+static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
+{
+ u16 val;
+
+ DUMMY_DELAY_ACCESS;
+ val = readw(isp1362_hcd->data_reg);
+
+ return val;
+}
+
+static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val)
+{
+#if USE_32BIT
+ DUMMY_DELAY_ACCESS;
+ writel(val, isp1362_hcd->data_reg);
+#else
+ DUMMY_DELAY_ACCESS;
+ writew((u16)val, isp1362_hcd->data_reg);
+ DUMMY_DELAY_ACCESS;
+ writew(val >> 16, isp1362_hcd->data_reg);
+#endif
+}
+
+static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd)
+{
+ u32 val;
+
+#if USE_32BIT
+ DUMMY_DELAY_ACCESS;
+ val = readl(isp1362_hcd->data_reg);
+#else
+ DUMMY_DELAY_ACCESS;
+ val = (u32)readw(isp1362_hcd->data_reg);
+ DUMMY_DELAY_ACCESS;
+ val |= (u32)readw(isp1362_hcd->data_reg) << 16;
+#endif
+ return val;
+}
+
+/* use readsw/writesw to access the fifo whenever possible */
+/* assume HCDIRDATA or XFERCTR & addr_reg have been set up */
+static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
+{
+ u8 *dp = buf;
+ u16 data;
+
+ if (!len)
+ return;
+
+ RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf);
+#if USE_32BIT
+ if (len >= 4) {
+ RDBG("%s: Using readsl for %d dwords\n", __func__, len >> 2);
+ readsl(isp1362_hcd->data_reg, dp, len >> 2);
+ dp += len & ~3;
+ len &= 3;
+ }
+#endif
+ if (len >= 2) {
+ RDBG("%s: Using readsw for %d words\n", __func__, len >> 1);
+ insw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
+ dp += len & ~1;
+ len &= 1;
+ }
+
+ BUG_ON(len & ~1);
+ if (len > 0) {
+ data = isp1362_read_data16(isp1362_hcd);
+ RDBG("%s: Reading trailing byte %02x to mem @ %08x\n", __func__,
+ (u8)data, (u32)dp);
+ *dp = (u8)data;
+ }
+}
+
+static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
+{
+ u8 *dp = buf;
+ u16 data;
+
+ if (!len)
+ return;
+
+ if ((unsigned long)dp & 0x1) {
+ /* not aligned */
+ for (; len > 1; len -= 2) {
+ data = *dp++;
+ data |= *dp++ << 8;
+ isp1362_write_data16(isp1362_hcd, data);
+ }
+ if (len)
+ isp1362_write_data16(isp1362_hcd, *dp);
+ return;
+ }
+
+ RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf);
+#if USE_32BIT
+ if (len >= 4) {
+ RDBG("%s: Using writesl for %d dwords\n", __func__, len >> 2);
+ writesl(isp1362_hcd->data_reg, dp, len >> 2);
+ dp += len & ~3;
+ len &= 3;
+ }
+#endif
+ if (len >= 2) {
+ RDBG("%s: Using writesw for %d words\n", __func__, len >> 1);
+ outsw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
+ dp += len & ~1;
+ len &= 1;
+ }
+
+ BUG_ON(len & ~1);
+ if (len > 0) {
+ /* finally write any trailing byte; we don't need to care
+ * about the high byte of the last word written
+ */
+ data = (u16)*dp;
+ RDBG("%s: Sending trailing byte %02x from mem @ %08x\n", __func__,
+ data, (u32)dp);
+ isp1362_write_data16(isp1362_hcd, data);
+ }
+}
+
+#define isp1362_read_reg16(d, r) ({ \
+ u16 __v; \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \
+ isp1362_write_addr(d, ISP1362_REG_##r); \
+ __v = isp1362_read_data16(d); \
+ RDBG("%s: Read %04x from %s[%02x]\n", __func__, __v, #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+ __v; \
+})
+
+#define isp1362_read_reg32(d, r) ({ \
+ u32 __v; \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \
+ isp1362_write_addr(d, ISP1362_REG_##r); \
+ __v = isp1362_read_data32(d); \
+ RDBG("%s: Read %08x from %s[%02x]\n", __func__, __v, #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+ __v; \
+})
+
+#define isp1362_write_reg16(d, r, v) { \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \
+ isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \
+ isp1362_write_data16(d, (u16)(v)); \
+ RDBG("%s: Wrote %04x to %s[%02x]\n", __func__, (u16)(v), #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+}
+
+#define isp1362_write_reg32(d, r, v) { \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \
+ isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \
+ isp1362_write_data32(d, (u32)(v)); \
+ RDBG("%s: Wrote %08x to %s[%02x]\n", __func__, (u32)(v), #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+}
+
+#define isp1362_set_mask16(d, r, m) { \
+ u16 __v; \
+ __v = isp1362_read_reg16(d, r); \
+ if ((__v | m) != __v) \
+ isp1362_write_reg16(d, r, __v | m); \
+}
+
+#define isp1362_clr_mask16(d, r, m) { \
+ u16 __v; \
+ __v = isp1362_read_reg16(d, r); \
+ if ((__v & ~m) != __v) \
+ isp1362_write_reg16(d, r, __v & ~m); \
+}
+
+#define isp1362_set_mask32(d, r, m) { \
+ u32 __v; \
+ __v = isp1362_read_reg32(d, r); \
+ if ((__v | m) != __v) \
+ isp1362_write_reg32(d, r, __v | m); \
+}
+
+#define isp1362_clr_mask32(d, r, m) { \
+ u32 __v; \
+ __v = isp1362_read_reg32(d, r); \
+ if ((__v & ~m) != __v) \
+ isp1362_write_reg32(d, r, __v & ~m); \
+}
+
+#define isp1362_show_reg(d, r) { \
+ if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \
+ DBG(0, "%-12s[%02x]: %08x\n", #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg32(d, r)); \
+ else \
+ DBG(0, "%-12s[%02x]: %04x\n", #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \
+}
+
+static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd)
+{
+ isp1362_show_reg(isp1362_hcd, HCREVISION);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ isp1362_show_reg(isp1362_hcd, HCCMDSTAT);
+ isp1362_show_reg(isp1362_hcd, HCINTSTAT);
+ isp1362_show_reg(isp1362_hcd, HCINTENB);
+ isp1362_show_reg(isp1362_hcd, HCFMINTVL);
+ isp1362_show_reg(isp1362_hcd, HCFMREM);
+ isp1362_show_reg(isp1362_hcd, HCFMNUM);
+ isp1362_show_reg(isp1362_hcd, HCLSTHRESH);
+ isp1362_show_reg(isp1362_hcd, HCRHDESCA);
+ isp1362_show_reg(isp1362_hcd, HCRHDESCB);
+ isp1362_show_reg(isp1362_hcd, HCRHSTATUS);
+ isp1362_show_reg(isp1362_hcd, HCRHPORT1);
+ isp1362_show_reg(isp1362_hcd, HCRHPORT2);
+
+ isp1362_show_reg(isp1362_hcd, HCHWCFG);
+ isp1362_show_reg(isp1362_hcd, HCDMACFG);
+ isp1362_show_reg(isp1362_hcd, HCXFERCTR);
+ isp1362_show_reg(isp1362_hcd, HCuPINT);
+
+ if (in_interrupt())
+ DBG(0, "%-12s[%02x]: %04x\n", "HCuPINTENB",
+ ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), isp1362_hcd->irqenb);
+ else
+ isp1362_show_reg(isp1362_hcd, HCuPINTENB);
+ isp1362_show_reg(isp1362_hcd, HCCHIPID);
+ isp1362_show_reg(isp1362_hcd, HCSCRATCH);
+ isp1362_show_reg(isp1362_hcd, HCBUFSTAT);
+ isp1362_show_reg(isp1362_hcd, HCDIRADDR);
+ /* Access would advance fifo
+ * isp1362_show_reg(isp1362_hcd, HCDIRDATA);
+ */
+ isp1362_show_reg(isp1362_hcd, HCISTLBUFSZ);
+ isp1362_show_reg(isp1362_hcd, HCISTLRATE);
+ isp1362_show_reg(isp1362_hcd, HCINTLBUFSZ);
+ isp1362_show_reg(isp1362_hcd, HCINTLBLKSZ);
+ isp1362_show_reg(isp1362_hcd, HCINTLDONE);
+ isp1362_show_reg(isp1362_hcd, HCINTLSKIP);
+ isp1362_show_reg(isp1362_hcd, HCINTLLAST);
+ isp1362_show_reg(isp1362_hcd, HCINTLCURR);
+ isp1362_show_reg(isp1362_hcd, HCATLBUFSZ);
+ isp1362_show_reg(isp1362_hcd, HCATLBLKSZ);
+ /* only valid after ATL_DONE interrupt
+ * isp1362_show_reg(isp1362_hcd, HCATLDONE);
+ */
+ isp1362_show_reg(isp1362_hcd, HCATLSKIP);
+ isp1362_show_reg(isp1362_hcd, HCATLLAST);
+ isp1362_show_reg(isp1362_hcd, HCATLCURR);
+ isp1362_show_reg(isp1362_hcd, HCATLDTC);
+ isp1362_show_reg(isp1362_hcd, HCATLDTCTO);
+}
+
+static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len)
+{
+ len = (len + 1) & ~1;
+
+ isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE);
+ isp1362_write_reg32(isp1362_hcd, HCDIRADDR,
+ HCDIRADDR_ADDR(offset) | HCDIRADDR_COUNT(len));
+}
+
+static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
+{
+ isp1362_write_diraddr(isp1362_hcd, offset, len);
+
+ DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %p\n",
+ __func__, len, offset, buf);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+
+ isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA);
+
+ isp1362_read_fifo(isp1362_hcd, buf, len);
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+}
+
+static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
+{
+ isp1362_write_diraddr(isp1362_hcd, offset, len);
+
+ DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %p\n",
+ __func__, len, offset, buf);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+
+ isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET);
+ isp1362_write_fifo(isp1362_hcd, buf, len);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+}
+
+static void __attribute__((unused)) dump_data(char *buf, int len)
+{
+ if (dbg_level > 0) {
+ int k;
+ int lf = 0;
+
+ for (k = 0; k < len; ++k) {
+ if (!lf)
+ DBG(0, "%04x:", k);
+ printk(" %02x", ((u8 *) buf)[k]);
+ lf = 1;
+ if (!k)
+ continue;
+ if (k % 16 == 15) {
+ printk("\n");
+ lf = 0;
+ continue;
+ }
+ if (k % 8 == 7)
+ printk(" ");
+ if (k % 4 == 3)
+ printk(" ");
+ }
+ if (lf)
+ printk("\n");
+ }
+}
+
+#if defined(PTD_TRACE)
+
+static void dump_ptd(struct ptd *ptd)
+{
+ DBG(0, "EP %p: CC=%x EP=%d DIR=%x CNT=%d LEN=%d MPS=%d TGL=%x ACT=%x FA=%d SPD=%x SF=%x PR=%x LST=%x\n",
+ container_of(ptd, struct isp1362_ep, ptd),
+ PTD_GET_CC(ptd), PTD_GET_EP(ptd), PTD_GET_DIR(ptd),
+ PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd),
+ PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd), PTD_GET_FA(ptd),
+ PTD_GET_SPD(ptd), PTD_GET_SF_INT(ptd), PTD_GET_PR(ptd), PTD_GET_LAST(ptd));
+ DBG(0, " %04x %04x %04x %04x\n", ptd->count, ptd->mps, ptd->len, ptd->faddr);
+}
+
+static void dump_ptd_out_data(struct ptd *ptd, u8 *buf)
+{
+ if (dbg_level > 0) {
+ if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) {
+ DBG(0, "--out->\n");
+ dump_data(buf, PTD_GET_LEN(ptd));
+ }
+ }
+}
+
+static void dump_ptd_in_data(struct ptd *ptd, u8 *buf)
+{
+ if (dbg_level > 0) {
+ if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) {
+ DBG(0, "<--in--\n");
+ dump_data(buf, PTD_GET_COUNT(ptd));
+ }
+ DBG(0, "-----\n");
+ }
+}
+
+static void dump_ptd_queue(struct isp1362_ep_queue *epq)
+{
+ struct isp1362_ep *ep;
+ int dbg = dbg_level;
+
+ dbg_level = 1;
+ list_for_each_entry(ep, &epq->active, active) {
+ dump_ptd(&ep->ptd);
+ dump_data(ep->data, ep->length);
+ }
+ dbg_level = dbg;
+}
+#else
+#define dump_ptd(ptd) do {} while (0)
+#define dump_ptd_in_data(ptd, buf) do {} while (0)
+#define dump_ptd_out_data(ptd, buf) do {} while (0)
+#define dump_ptd_data(ptd, buf) do {} while (0)
+#define dump_ptd_queue(epq) do {} while (0)
+#endif
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index b899f1a59c2..51a0ae9cdd1 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -8,29 +8,46 @@
*
* (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
*
+ * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
+ *
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
#include <asm/unaligned.h>
+#include <asm/cacheflush.h>
+#include <linux/gpio.h>
-#include "../core/hcd.h"
#include "isp1760-hcd.h"
static struct kmem_cache *qtd_cachep;
static struct kmem_cache *qh_cachep;
+static struct kmem_cache *urb_listitem_cachep;
+
+enum queue_head_types {
+ QH_CONTROL,
+ QH_BULK,
+ QH_INTERRUPT,
+ QH_END
+};
struct isp1760_hcd {
u32 hcs_params;
spinlock_t lock;
- struct inter_packet_info atl_ints[32];
- struct inter_packet_info int_ints[32];
+ struct slotinfo atl_slots[32];
+ int atl_done_map;
+ struct slotinfo int_slots[32];
+ int int_done_map;
struct memory_chunk memory_pool[BLOCKS];
+ struct list_head qh_list[QH_END];
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024
@@ -39,16 +56,14 @@ struct isp1760_hcd {
unsigned long reset_done;
unsigned long next_statechange;
unsigned int devflags;
+
+ int rst_gpio;
};
static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
{
return (struct isp1760_hcd *) (hcd->hcd_priv);
}
-static inline struct usb_hcd *priv_to_hcd(struct isp1760_hcd *priv)
-{
- return container_of((void *) priv, struct usb_hcd, hcd_priv);
-}
/* Section 2.2 Host Controller Capability Registers */
#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
@@ -78,217 +93,271 @@ static inline struct usb_hcd *priv_to_hcd(struct isp1760_hcd *priv)
#define PORT_RWC_BITS (PORT_CSC)
struct isp1760_qtd {
- struct isp1760_qtd *hw_next;
u8 packet_type;
- u8 toggle;
-
void *data_buffer;
+ u32 payload_addr;
+
/* the rest is HCD-private */
struct list_head qtd_list;
struct urb *urb;
size_t length;
-
- /* isp special*/
+ size_t actual_length;
+
+ /* QTD_ENQUEUED: waiting for transfer (inactive) */
+ /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
+ /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
+ interrupt handler may touch this qtd! */
+ /* QTD_XFER_COMPLETE: payload has been transferred successfully */
+ /* QTD_RETIRE: transfer error/abort qtd */
+#define QTD_ENQUEUED 0
+#define QTD_PAYLOAD_ALLOC 1
+#define QTD_XFER_STARTED 2
+#define QTD_XFER_COMPLETE 3
+#define QTD_RETIRE 4
u32 status;
-#define URB_COMPLETE_NOTIFY (1 << 0)
-#define URB_ENQUEUED (1 << 1)
-#define URB_TYPE_ATL (1 << 2)
-#define URB_TYPE_INT (1 << 3)
};
+/* Queue head, one for each active endpoint */
struct isp1760_qh {
- /* first part defined by EHCI spec */
+ struct list_head qh_list;
struct list_head qtd_list;
- struct isp1760_hcd *priv;
-
- /* periodic schedule info */
- unsigned short period; /* polling interval */
- struct usb_device *dev;
-
u32 toggle;
u32 ping;
+ int slot;
+ int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
};
-#define ehci_port_speed(priv, portsc) (1 << USB_PORT_FEAT_HIGHSPEED)
+struct urb_listitem {
+ struct list_head urb_list;
+ struct urb *urb;
+};
-static unsigned int isp1760_readl(__u32 __iomem *regs)
+/*
+ * Access functions for isp176x registers (addresses 0..0x03FF).
+ */
+static u32 reg_read32(void __iomem *base, u32 reg)
{
- return readl(regs);
+ return readl(base + reg);
}
-static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
+static void reg_write32(void __iomem *base, u32 reg, u32 val)
{
- writel(val, regs);
+ writel(val, base + reg);
}
/*
- * The next two copy via MMIO data to/from the device. memcpy_{to|from}io()
+ * Access functions for isp176x memory (offset >= 0x0400).
+ *
+ * bank_reads8() reads memory locations prefetched by an earlier write to
+ * HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
+ * bank optimizations, you should use the more generic mem_reads8() below.
+ *
+ * For access to ptd memory, use the specialized ptd_read() and ptd_write()
+ * below.
+ *
+ * These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
* doesn't quite work because some people have to enforce 32-bit access
*/
-static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
- __u32 __iomem *dst, u32 len)
+static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
+ __u32 *dst, u32 bytes)
{
+ __u32 __iomem *src;
u32 val;
- u8 *buff8;
+ __u8 *src_byteptr;
+ __u8 *dst_byteptr;
- if (!src) {
- printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
- return;
- }
+ src = src_base + (bank_addr | src_offset);
- while (len >= 4) {
- *src = __raw_readl(dst);
- len -= 4;
- src++;
- dst++;
+ if (src_offset < PAYLOAD_OFFSET) {
+ while (bytes >= 4) {
+ *dst = le32_to_cpu(__raw_readl(src));
+ bytes -= 4;
+ src++;
+ dst++;
+ }
+ } else {
+ while (bytes >= 4) {
+ *dst = __raw_readl(src);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
}
- if (!len)
+ if (!bytes)
return;
/* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
* allocated.
*/
- val = isp1760_readl(dst);
-
- buff8 = (u8 *)src;
- while (len) {
-
- *buff8 = val;
- val >>= 8;
- len--;
- buff8++;
+ if (src_offset < PAYLOAD_OFFSET)
+ val = le32_to_cpu(__raw_readl(src));
+ else
+ val = __raw_readl(src);
+
+ dst_byteptr = (void *) dst;
+ src_byteptr = (void *) &val;
+ while (bytes > 0) {
+ *dst_byteptr = *src_byteptr;
+ dst_byteptr++;
+ src_byteptr++;
+ bytes--;
}
}
-static void priv_write_copy(const struct isp1760_hcd *priv, const u32 *src,
- __u32 __iomem *dst, u32 len)
+static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
+ u32 bytes)
{
- while (len >= 4) {
- __raw_writel(*src, dst);
- len -= 4;
- src++;
- dst++;
+ reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
+ ndelay(90);
+ bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
+}
+
+static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
+ __u32 const *src, u32 bytes)
+{
+ __u32 __iomem *dst;
+
+ dst = dst_base + dst_offset;
+
+ if (dst_offset < PAYLOAD_OFFSET) {
+ while (bytes >= 4) {
+ __raw_writel(cpu_to_le32(*src), dst);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
+ } else {
+ while (bytes >= 4) {
+ __raw_writel(*src, dst);
+ bytes -= 4;
+ src++;
+ dst++;
+ }
}
- if (!len)
+ if (!bytes)
return;
- /* in case we have 3, 2 or 1 by left. The buffer is allocated and the
- * extra bytes should not be read by the HW
+ /* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
+ * extra bytes should not be read by the HW.
*/
- __raw_writel(*src, dst);
+ if (dst_offset < PAYLOAD_OFFSET)
+ __raw_writel(cpu_to_le32(*src), dst);
+ else
+ __raw_writel(*src, dst);
}
+/*
+ * Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
+ * INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
+ */
+static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ reg_write32(base, HC_MEMORY_REG,
+ ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
+ ndelay(90);
+ bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
+ (void *) ptd, sizeof(*ptd));
+}
+
+static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
+ struct ptd *ptd)
+{
+ mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
+ &ptd->dw1, 7*sizeof(ptd->dw1));
+ /* Make sure dw0 gets written last (after other dw's and after payload)
+ since it contains the enable bit */
+ wmb();
+ mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
+ sizeof(ptd->dw0));
+}
+
+
/* memory management of the 60kb on the chip from 0x1000 to 0xffff */
static void init_memory(struct isp1760_hcd *priv)
{
- int i;
- u32 payload;
+ int i, curr;
+ u32 payload_addr;
- payload = 0x1000;
+ payload_addr = PAYLOAD_OFFSET;
for (i = 0; i < BLOCK_1_NUM; i++) {
- priv->memory_pool[i].start = payload;
+ priv->memory_pool[i].start = payload_addr;
priv->memory_pool[i].size = BLOCK_1_SIZE;
priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ payload_addr += priv->memory_pool[i].size;
}
-
- for (i = BLOCK_1_NUM; i < BLOCK_1_NUM + BLOCK_2_NUM; i++) {
- priv->memory_pool[i].start = payload;
- priv->memory_pool[i].size = BLOCK_2_SIZE;
- priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ curr = i;
+ for (i = 0; i < BLOCK_2_NUM; i++) {
+ priv->memory_pool[curr + i].start = payload_addr;
+ priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
+ priv->memory_pool[curr + i].free = 1;
+ payload_addr += priv->memory_pool[curr + i].size;
}
-
- for (i = BLOCK_1_NUM + BLOCK_2_NUM; i < BLOCKS; i++) {
- priv->memory_pool[i].start = payload;
- priv->memory_pool[i].size = BLOCK_3_SIZE;
- priv->memory_pool[i].free = 1;
- payload += priv->memory_pool[i].size;
+ curr = i;
+ for (i = 0; i < BLOCK_3_NUM; i++) {
+ priv->memory_pool[curr + i].start = payload_addr;
+ priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
+ priv->memory_pool[curr + i].free = 1;
+ payload_addr += priv->memory_pool[curr + i].size;
}
- BUG_ON(payload - priv->memory_pool[i - 1].size > PAYLOAD_SIZE);
+ WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
}
-static u32 alloc_mem(struct isp1760_hcd *priv, u32 size)
+static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
int i;
- if (!size)
- return ISP1760_NULL_POINTER;
+ WARN_ON(qtd->payload_addr);
+
+ if (!qtd->length)
+ return;
for (i = 0; i < BLOCKS; i++) {
- if (priv->memory_pool[i].size >= size &&
+ if (priv->memory_pool[i].size >= qtd->length &&
priv->memory_pool[i].free) {
-
priv->memory_pool[i].free = 0;
- return priv->memory_pool[i].start;
+ qtd->payload_addr = priv->memory_pool[i].start;
+ return;
}
}
-
- printk(KERN_ERR "ISP1760 MEM: can not allocate %d bytes of memory\n",
- size);
- printk(KERN_ERR "Current memory map:\n");
- for (i = 0; i < BLOCKS; i++) {
- printk(KERN_ERR "Pool %2d size %4d status: %d\n",
- i, priv->memory_pool[i].size,
- priv->memory_pool[i].free);
- }
- /* XXX maybe -ENOMEM could be possible */
- BUG();
- return 0;
}
-static void free_mem(struct isp1760_hcd *priv, u32 mem)
+static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
int i;
- if (mem == ISP1760_NULL_POINTER)
+ if (!qtd->payload_addr)
return;
for (i = 0; i < BLOCKS; i++) {
- if (priv->memory_pool[i].start == mem) {
-
- BUG_ON(priv->memory_pool[i].free);
-
+ if (priv->memory_pool[i].start == qtd->payload_addr) {
+ WARN_ON(priv->memory_pool[i].free);
priv->memory_pool[i].free = 1;
- return ;
+ qtd->payload_addr = 0;
+ return;
}
}
- printk(KERN_ERR "Trying to free not-here-allocated memory :%08x\n",
- mem);
- BUG();
-}
-
-static void isp1760_init_regs(struct usb_hcd *hcd)
-{
- isp1760_writel(0, hcd->regs + HC_BUFFER_STATUS_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ISO_PTD_SKIPMAP_REG);
-
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ATL_PTD_DONEMAP_REG);
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_INT_PTD_DONEMAP_REG);
- isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
- HC_ISO_PTD_DONEMAP_REG);
+ dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
+ __func__, qtd->payload_addr);
+ WARN_ON(1);
+ qtd->payload_addr = 0;
}
-static int handshake(struct isp1760_hcd *priv, void __iomem *ptr,
+static int handshake(struct usb_hcd *hcd, u32 reg,
u32 mask, u32 done, int usec)
{
u32 result;
do {
- result = isp1760_readl(ptr);
+ result = reg_read32(hcd->regs, reg);
if (result == ~0)
return -ENODEV;
result &= mask;
@@ -301,57 +370,56 @@ static int handshake(struct isp1760_hcd *priv, void __iomem *ptr,
}
/* reset a non-running (STS_HALT == 1) controller */
-static int ehci_reset(struct isp1760_hcd *priv)
+static int ehci_reset(struct usb_hcd *hcd)
{
int retval;
- struct usb_hcd *hcd = priv_to_hcd(priv);
- u32 command = isp1760_readl(hcd->regs + HC_USBCMD);
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+
+ u32 command = reg_read32(hcd->regs, HC_USBCMD);
command |= CMD_RESET;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
+ reg_write32(hcd->regs, HC_USBCMD, command);
hcd->state = HC_STATE_HALT;
priv->next_statechange = jiffies;
- retval = handshake(priv, hcd->regs + HC_USBCMD,
+ retval = handshake(hcd, HC_USBCMD,
CMD_RESET, 0, 250 * 1000);
return retval;
}
-static void qh_destroy(struct isp1760_qh *qh)
-{
- BUG_ON(!list_empty(&qh->qtd_list));
- kmem_cache_free(qh_cachep, qh);
-}
-
-static struct isp1760_qh *isp1760_qh_alloc(struct isp1760_hcd *priv,
- gfp_t flags)
+static struct isp1760_qh *qh_alloc(gfp_t flags)
{
struct isp1760_qh *qh;
qh = kmem_cache_zalloc(qh_cachep, flags);
if (!qh)
- return qh;
+ return NULL;
+ INIT_LIST_HEAD(&qh->qh_list);
INIT_LIST_HEAD(&qh->qtd_list);
- qh->priv = priv;
+ qh->slot = -1;
+
return qh;
}
-/* magic numbers that can affect system performance */
-#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
-#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
-#define EHCI_TUNE_RL_TT 0
-#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
-#define EHCI_TUNE_MULT_TT 1
-#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
+static void qh_free(struct isp1760_qh *qh)
+{
+ WARN_ON(!list_empty(&qh->qtd_list));
+ WARN_ON(qh->slot > -1);
+ kmem_cache_free(qh_cachep, qh);
+}
/* one-time init, only for memory state */
static int priv_init(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 hcc_params;
+ int i;
spin_lock_init(&priv->lock);
+ for (i = 0; i < QH_END; i++)
+ INIT_LIST_HEAD(&priv->qh_list[i]);
+
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -359,7 +427,7 @@ static int priv_init(struct usb_hcd *hcd)
priv->periodic_size = DEFAULT_I_TDPS;
/* controllers may cache some of the periodic schedule ... */
- hcc_params = isp1760_readl(hcd->regs + HC_HCCPARAMS);
+ hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
/* full frame cache */
if (HCC_ISOC_CACHE(hcc_params))
priv->i_thresh = 8;
@@ -375,6 +443,18 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
int result;
u32 scratch, hwmode;
+ /* low-level chip reset */
+ if (gpio_is_valid(priv->rst_gpio)) {
+ unsigned int rst_lvl;
+
+ rst_lvl = (priv->devflags &
+ ISP1760_FLAG_RESET_ACTIVE_HIGH) ? 1 : 0;
+
+ gpio_set_value(priv->rst_gpio, rst_lvl);
+ mdelay(50);
+ gpio_set_value(priv->rst_gpio, !rst_lvl);
+ }
+
/* Setup HW Mode Control: This assumes a level active-low interrupt */
hwmode = HW_DATA_BUS_32BIT;
@@ -386,52 +466,58 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
hwmode |= HW_DACK_POL_HIGH;
if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
hwmode |= HW_DREQ_POL_HIGH;
+ if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH)
+ hwmode |= HW_INTR_HIGH_ACT;
+ if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
+ hwmode |= HW_INTR_EDGE_TRIG;
/*
* We have to set this first in case we're in 16-bit mode.
* Write it twice to ensure correct upper bits if switching
* to 16-bit mode.
*/
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
- isp1760_writel(0xdeadbabe, hcd->regs + HC_SCRATCH_REG);
+ reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
/* Change bus pattern */
- scratch = isp1760_readl(hcd->regs + HC_CHIP_ID_REG);
- scratch = isp1760_readl(hcd->regs + HC_SCRATCH_REG);
+ scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
+ scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
if (scratch != 0xdeadbabe) {
- printk(KERN_ERR "ISP1760: Scratch test failed.\n");
+ dev_err(hcd->self.controller, "Scratch test failed.\n");
return -ENODEV;
}
/* pre reset */
- isp1760_init_regs(hcd);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
+ reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
/* reset */
- isp1760_writel(SW_RESET_RESET_ALL, hcd->regs + HC_RESET_REG);
+ reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
mdelay(100);
- isp1760_writel(SW_RESET_RESET_HC, hcd->regs + HC_RESET_REG);
+ reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_HC);
mdelay(100);
- result = ehci_reset(priv);
+ result = ehci_reset(hcd);
if (result)
return result;
/* Step 11 passed */
- isp1760_info(priv, "bus width: %d, oc: %s\n",
+ dev_info(hcd->self.controller, "bus width: %d, oc: %s\n",
(priv->devflags & ISP1760_FLAG_BUS_WIDTH_16) ?
16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
"analog" : "digital");
/* ATL reset */
- isp1760_writel(hwmode | ALL_ATX_RESET, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
mdelay(10);
- isp1760_writel(hwmode, hcd->regs + HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
- isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_REG);
- isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_ENABLE);
+ reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
/*
* PORT 1 Control register of the ISP1760 is the OTG control
@@ -439,1127 +525,958 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
* support in this driver, we use port 1 as a "normal" USB host port on
* both chips.
*/
- isp1760_writel(PORT1_POWER | PORT1_INIT2,
- hcd->regs + HC_PORT1_CTRL);
+ reg_write32(hcd->regs, HC_PORT1_CTRL, PORT1_POWER | PORT1_INIT2);
mdelay(10);
- priv->hcs_params = isp1760_readl(hcd->regs + HC_HCSPARAMS);
+ priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
return priv_init(hcd);
}
-static void isp1760_init_maps(struct usb_hcd *hcd)
-{
- /*set last maps, for iso its only 1, else 32 tds bitmap*/
- isp1760_writel(0x80000000, hcd->regs + HC_ATL_PTD_LASTPTD_REG);
- isp1760_writel(0x80000000, hcd->regs + HC_INT_PTD_LASTPTD_REG);
- isp1760_writel(0x00000001, hcd->regs + HC_ISO_PTD_LASTPTD_REG);
-}
-
-static void isp1760_enable_interrupts(struct usb_hcd *hcd)
+static u32 base_to_chip(u32 base)
{
- isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_AND_REG);
- isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_AND_REG);
- isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- isp1760_writel(0, hcd->regs + HC_ISO_IRQ_MASK_AND_REG);
- isp1760_writel(0xffffffff, hcd->regs + HC_ISO_IRQ_MASK_OR_REG);
- /* step 23 passed */
+ return ((base - 0x400) >> 3);
}
-static int isp1760_run(struct usb_hcd *hcd)
+static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
- int retval;
- u32 temp;
- u32 command;
- u32 chipid;
-
- hcd->uses_new_polling = 1;
- hcd->poll_rh = 0;
-
- hcd->state = HC_STATE_RUNNING;
- isp1760_enable_interrupts(hcd);
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp | HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
-
- command = isp1760_readl(hcd->regs + HC_USBCMD);
- command &= ~(CMD_LRESET|CMD_RESET);
- command |= CMD_RUN;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
-
- retval = handshake(priv, hcd->regs + HC_USBCMD, CMD_RUN, CMD_RUN,
- 250 * 1000);
- if (retval)
- return retval;
-
- /*
- * XXX
- * Spec says to write FLAG_CF as last config action, priv code grabs
- * the semaphore while doing so.
- */
- down_write(&ehci_cf_port_reset_rwsem);
- isp1760_writel(FLAG_CF, hcd->regs + HC_CONFIGFLAG);
-
- retval = handshake(priv, hcd->regs + HC_CONFIGFLAG, FLAG_CF, FLAG_CF,
- 250 * 1000);
- up_write(&ehci_cf_port_reset_rwsem);
- if (retval)
- return retval;
-
- chipid = isp1760_readl(hcd->regs + HC_CHIP_ID_REG);
- isp1760_info(priv, "USB ISP %04x HW rev. %d started\n", chipid & 0xffff,
- chipid >> 16);
+ struct urb *urb;
- /* PTD Register Init Part 2, Step 28 */
- /* enable INTs */
- isp1760_init_maps(hcd);
+ if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
+ return 1;
- /* GRR this is run-once init(), being done every time the HC starts.
- * So long as they're part of class devices, we can't do it init()
- * since the class device isn't created that early.
- */
- return 0;
+ urb = qtd->urb;
+ qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
+ return (qtd->urb != urb);
}
-static u32 base_to_chip(u32 base)
-{
- return ((base - 0x400) >> 3);
-}
+/* magic numbers that can affect system performance */
+#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define EHCI_TUNE_RL_TT 0
+#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define EHCI_TUNE_MULT_TT 1
+#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
-static void transform_into_atl(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
+static void create_ptd_atl(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- u32 dw0;
- u32 dw1;
- u32 dw2;
- u32 dw3;
u32 maxpacket;
u32 multi;
- u32 pid_code;
u32 rl = RL_COUNTER;
u32 nak = NAK_COUNTER;
+ memset(ptd, 0, sizeof(*ptd));
+
/* according to 3.6.2, max packet len can not be > 0x400 */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
+ usb_pipeout(qtd->urb->pipe));
multi = 1 + ((maxpacket >> 11) & 0x3);
maxpacket &= 0x7ff;
/* DW0 */
- dw0 = PTD_VALID;
- dw0 |= PTD_LENGTH(qtd->length);
- dw0 |= PTD_MAXPACKET(maxpacket);
- dw0 |= PTD_ENDPOINT(usb_pipeendpoint(urb->pipe));
- dw1 = usb_pipeendpoint(urb->pipe) >> 1;
+ ptd->dw0 = DW0_VALID_BIT;
+ ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
+ ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
+ ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
/* DW1 */
- dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(urb->pipe));
+ ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
+ ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
+ ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
- pid_code = qtd->packet_type;
- dw1 |= PTD_PID_TOKEN(pid_code);
+ if (usb_pipebulk(qtd->urb->pipe))
+ ptd->dw1 |= DW1_TRANS_BULK;
+ else if (usb_pipeint(qtd->urb->pipe))
+ ptd->dw1 |= DW1_TRANS_INT;
- if (usb_pipebulk(urb->pipe))
- dw1 |= PTD_TRANS_BULK;
- else if (usb_pipeint(urb->pipe))
- dw1 |= PTD_TRANS_INT;
-
- if (urb->dev->speed != USB_SPEED_HIGH) {
+ if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
/* split transaction */
- dw1 |= PTD_TRANS_SPLIT;
- if (urb->dev->speed == USB_SPEED_LOW)
- dw1 |= PTD_SE_USB_LOSPEED;
+ ptd->dw1 |= DW1_TRANS_SPLIT;
+ if (qtd->urb->dev->speed == USB_SPEED_LOW)
+ ptd->dw1 |= DW1_SE_USB_LOSPEED;
- dw1 |= PTD_PORT_NUM(urb->dev->ttport);
- dw1 |= PTD_HUB_NUM(urb->dev->tt->hub->devnum);
+ ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
+ ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
/* SE bit for Split INT transfers */
- if (usb_pipeint(urb->pipe) &&
- (urb->dev->speed == USB_SPEED_LOW))
- dw1 |= 2 << 16;
+ if (usb_pipeint(qtd->urb->pipe) &&
+ (qtd->urb->dev->speed == USB_SPEED_LOW))
+ ptd->dw1 |= 2 << 16;
- dw3 = 0;
rl = 0;
nak = 0;
} else {
- dw0 |= PTD_MULTI(multi);
- if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe))
- dw3 = qh->ping;
- else
- dw3 = 0;
+ ptd->dw0 |= TO_DW0_MULTI(multi);
+ if (usb_pipecontrol(qtd->urb->pipe) ||
+ usb_pipebulk(qtd->urb->pipe))
+ ptd->dw3 |= TO_DW3_PING(qh->ping);
}
/* DW2 */
- dw2 = 0;
- dw2 |= PTD_DATA_START_ADDR(base_to_chip(payload));
- dw2 |= PTD_RL_CNT(rl);
- dw3 |= PTD_NAC_CNT(nak);
+ ptd->dw2 = 0;
+ ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
+ ptd->dw2 |= TO_DW2_RL(rl);
/* DW3 */
- if (usb_pipecontrol(urb->pipe))
- dw3 |= PTD_DATA_TOGGLE(qtd->toggle);
- else
- dw3 |= qh->toggle;
-
+ ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
+ ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
+ if (usb_pipecontrol(qtd->urb->pipe)) {
+ if (qtd->data_buffer == qtd->urb->setup_packet)
+ ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
+ else if (last_qtd_of_urb(qtd, qh))
+ ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
+ }
- dw3 |= PTD_ACTIVE;
+ ptd->dw3 |= DW3_ACTIVE_BIT;
/* Cerr */
- dw3 |= PTD_CERR(ERR_COUNTER);
-
- memset(ptd, 0, sizeof(*ptd));
-
- ptd->dw0 = cpu_to_le32(dw0);
- ptd->dw1 = cpu_to_le32(dw1);
- ptd->dw2 = cpu_to_le32(dw2);
- ptd->dw3 = cpu_to_le32(dw3);
+ ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
}
-static void transform_add_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
+static void transform_add_int(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- u32 maxpacket;
- u32 multi;
- u32 numberofusofs;
- u32 i;
- u32 usofmask, usof;
+ u32 usof;
u32 period;
- maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
- multi = 1 + ((maxpacket >> 11) & 0x3);
- maxpacket &= 0x7ff;
- /* length of the data per uframe */
- maxpacket = multi * maxpacket;
-
- numberofusofs = urb->transfer_buffer_length / maxpacket;
- if (urb->transfer_buffer_length % maxpacket)
- numberofusofs += 1;
-
- usofmask = 1;
- usof = 0;
- for (i = 0; i < numberofusofs; i++) {
- usof |= usofmask;
- usofmask <<= 1;
- }
-
- if (urb->dev->speed != USB_SPEED_HIGH) {
- /* split */
- ptd->dw5 = __constant_cpu_to_le32(0x1c);
+ /*
+ * Most of this is guessing. ISP1761 datasheet is quite unclear, and
+ * the algorithm from the original Philips driver code, which was
+ * pretty much used in this driver before as well, is quite horrendous
+ * and, i believe, incorrect. The code below follows the datasheet and
+ * USB2.0 spec as far as I can tell, and plug/unplug seems to be much
+ * more reliable this way (fingers crossed...).
+ */
- if (qh->period >= 32)
- period = qh->period / 2;
+ if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ /* urb->interval is in units of microframes (1/8 ms) */
+ period = qtd->urb->interval >> 3;
+
+ if (qtd->urb->interval > 4)
+ usof = 0x01; /* One bit set =>
+ interval 1 ms * uFrame-match */
+ else if (qtd->urb->interval > 2)
+ usof = 0x22; /* Two bits set => interval 1/2 ms */
+ else if (qtd->urb->interval > 1)
+ usof = 0x55; /* Four bits set => interval 1/4 ms */
else
- period = qh->period;
-
+ usof = 0xff; /* All bits set => interval 1/8 ms */
} else {
+ /* urb->interval is in units of frames (1 ms) */
+ period = qtd->urb->interval;
+ usof = 0x0f; /* Execute Start Split on any of the
+ four first uFrames */
- if (qh->period >= 8)
- period = qh->period/8;
- else
- period = qh->period;
-
- if (period >= 32)
- period = 16;
-
- if (qh->period >= 8) {
- /* millisecond period */
- period = (period << 3);
- } else {
- /* usof based tranmsfers */
- /* minimum 4 usofs */
- usof = 0x11;
- }
+ /*
+ * First 8 bits in dw5 is uSCS and "specifies which uSOF the
+ * complete split needs to be sent. Valid only for IN." Also,
+ * "All bits can be set to one for every transfer." (p 82,
+ * ISP1761 data sheet.) 0x1c is from Philips driver. Where did
+ * that number come from? 0xff seems to work fine...
+ */
+ /* ptd->dw5 = 0x1c; */
+ ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
}
- ptd->dw2 |= cpu_to_le32(period);
- ptd->dw4 = cpu_to_le32(usof);
-}
+ period = period >> 1;/* Ensure equal or shorter period than requested */
+ period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
-static void transform_into_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd, struct urb *urb,
- u32 payload, struct ptd *ptd)
-{
- transform_into_atl(priv, qh, qtd, urb, payload, ptd);
- transform_add_int(priv, qh, qtd, urb, payload, ptd);
+ ptd->dw2 |= period;
+ ptd->dw4 = usof;
}
-static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len,
- u32 token)
+static void create_ptd_int(struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd, struct ptd *ptd)
{
- int count;
-
- qtd->data_buffer = databuffer;
- qtd->packet_type = GET_QTD_TOKEN_TYPE(token);
- qtd->toggle = GET_DATA_TOGGLE(token);
-
- if (len > HC_ATL_PL_SIZE)
- count = HC_ATL_PL_SIZE;
- else
- count = len;
-
- qtd->length = count;
- return count;
+ create_ptd_atl(qh, qtd, ptd);
+ transform_add_int(qh, qtd, ptd);
}
-static int check_error(struct ptd *ptd)
+static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
+__releases(priv->lock)
+__acquires(priv->lock)
{
- int error = 0;
- u32 dw3;
-
- dw3 = le32_to_cpu(ptd->dw3);
- if (dw3 & DW3_HALT_BIT)
- error = -EPIPE;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
- if (dw3 & DW3_ERROR_BIT) {
- printk(KERN_ERR "error bit is set in DW3\n");
- error = -EPIPE;
+ if (!urb->unlinked) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = 0;
}
- if (dw3 & DW3_QTD_ACTIVE) {
- printk(KERN_ERR "transfer active bit is set DW3\n");
- printk(KERN_ERR "nak counter: %d, rl: %d\n", (dw3 >> 19) & 0xf,
- (le32_to_cpu(ptd->dw2) >> 25) & 0xf);
+ if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+ void *ptr;
+ for (ptr = urb->transfer_buffer;
+ ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+ ptr += PAGE_SIZE)
+ flush_dcache_page(virt_to_page(ptr));
}
- return error;
-}
-
-static void check_int_err_status(u32 dw4)
-{
- u32 i;
-
- dw4 >>= 8;
-
- for (i = 0; i < 8; i++) {
- switch (dw4 & 0x7) {
- case INT_UNDERRUN:
- printk(KERN_ERR "ERROR: under run , %d\n", i);
- break;
-
- case INT_EXACT:
- printk(KERN_ERR "ERROR: transaction error, %d\n", i);
- break;
-
- case INT_BABBLE:
- printk(KERN_ERR "ERROR: babble error, %d\n", i);
- break;
- }
- dw4 >>= 3;
- }
+ /* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&priv->lock);
+ usb_hcd_giveback_urb(hcd, urb, urb->status);
+ spin_lock(&priv->lock);
}
-static void enqueue_one_qtd(struct isp1760_qtd *qtd, struct isp1760_hcd *priv,
- u32 payload)
+static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
+ u8 packet_type)
{
- u32 token;
- struct usb_hcd *hcd = priv_to_hcd(priv);
+ struct isp1760_qtd *qtd;
- token = qtd->packet_type;
+ qtd = kmem_cache_zalloc(qtd_cachep, flags);
+ if (!qtd)
+ return NULL;
- if (qtd->length && (qtd->length <= HC_ATL_PL_SIZE)) {
- switch (token) {
- case IN_PID:
- break;
- case OUT_PID:
- case SETUP_PID:
- priv_write_copy(priv, qtd->data_buffer,
- hcd->regs + payload,
- qtd->length);
- }
- }
-}
+ INIT_LIST_HEAD(&qtd->qtd_list);
+ qtd->urb = urb;
+ qtd->packet_type = packet_type;
+ qtd->status = QTD_ENQUEUED;
+ qtd->actual_length = 0;
-static void enqueue_one_atl_qtd(u32 atl_regs, u32 payload,
- struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
-{
- struct ptd ptd;
- struct usb_hcd *hcd = priv_to_hcd(priv);
-
- transform_into_atl(priv, qh, qtd, urb, payload, &ptd);
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + atl_regs, sizeof(ptd));
- enqueue_one_qtd(qtd, priv, payload);
-
- priv->atl_ints[slot].urb = urb;
- priv->atl_ints[slot].qh = qh;
- priv->atl_ints[slot].qtd = qtd;
- priv->atl_ints[slot].data_buffer = qtd->data_buffer;
- priv->atl_ints[slot].payload = payload;
- qtd->status |= URB_ENQUEUED | URB_TYPE_ATL;
- qtd->status |= slot << 16;
+ return qtd;
}
-static void enqueue_one_int_qtd(u32 int_regs, u32 payload,
- struct isp1760_hcd *priv, struct isp1760_qh *qh,
- struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
+static void qtd_free(struct isp1760_qtd *qtd)
{
- struct ptd ptd;
- struct usb_hcd *hcd = priv_to_hcd(priv);
-
- transform_into_int(priv, qh, qtd, urb, payload, &ptd);
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + int_regs, sizeof(ptd));
- enqueue_one_qtd(qtd, priv, payload);
-
- priv->int_ints[slot].urb = urb;
- priv->int_ints[slot].qh = qh;
- priv->int_ints[slot].qtd = qtd;
- priv->int_ints[slot].data_buffer = qtd->data_buffer;
- priv->int_ints[slot].payload = payload;
- qtd->status |= URB_ENQUEUED | URB_TYPE_INT;
- qtd->status |= slot << 16;
+ WARN_ON(qtd->payload_addr);
+ kmem_cache_free(qtd_cachep, qtd);
}
-static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd)
+static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
+ struct slotinfo *slots, struct isp1760_qtd *qtd,
+ struct isp1760_qh *qh, struct ptd *ptd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 skip_map, or_map;
- u32 queue_entry;
- u32 slot;
- u32 atl_regs, payload;
- u32 buffstatus;
-
- skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
-
- BUG_ON(!skip_map);
- slot = __ffs(skip_map);
- queue_entry = 1 << slot;
-
- atl_regs = ATL_REGS_OFFSET + slot * sizeof(struct ptd);
-
- payload = alloc_mem(priv, qtd->length);
-
- enqueue_one_atl_qtd(atl_regs, payload, priv, qh, qtd->urb, slot, qtd);
-
- or_map = isp1760_readl(hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- or_map |= queue_entry;
- isp1760_writel(or_map, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
-
- skip_map &= ~queue_entry;
- isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
+ int skip_map;
+
+ WARN_ON((slot < 0) || (slot > 31));
+ WARN_ON(qtd->length && !qtd->payload_addr);
+ WARN_ON(slots[slot].qtd);
+ WARN_ON(slots[slot].qh);
+ WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
+
+ /* Make sure done map has not triggered from some unlinked transfer */
+ if (ptd_offset == ATL_PTD_OFFSET) {
+ priv->atl_done_map |= reg_read32(hcd->regs,
+ HC_ATL_PTD_DONEMAP_REG);
+ priv->atl_done_map &= ~(1 << slot);
+ } else {
+ priv->int_done_map |= reg_read32(hcd->regs,
+ HC_INT_PTD_DONEMAP_REG);
+ priv->int_done_map &= ~(1 << slot);
+ }
- buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
- buffstatus |= ATL_BUFFER;
- isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
+ qh->slot = slot;
+ qtd->status = QTD_XFER_STARTED;
+ slots[slot].timestamp = jiffies;
+ slots[slot].qtd = qtd;
+ slots[slot].qh = qh;
+ ptd_write(hcd->regs, ptd_offset, slot, ptd);
+
+ if (ptd_offset == ATL_PTD_OFFSET) {
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ skip_map &= ~(1 << qh->slot);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
+ } else {
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ skip_map &= ~(1 << qh->slot);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
+ }
}
-static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
- struct isp1760_qtd *qtd)
+static int is_short_bulk(struct isp1760_qtd *qtd)
{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
- u32 skip_map, or_map;
- u32 queue_entry;
- u32 slot;
- u32 int_regs, payload;
- u32 buffstatus;
-
- skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
-
- BUG_ON(!skip_map);
- slot = __ffs(skip_map);
- queue_entry = 1 << slot;
-
- int_regs = INT_REGS_OFFSET + slot * sizeof(struct ptd);
-
- payload = alloc_mem(priv, qtd->length);
-
- enqueue_one_int_qtd(int_regs, payload, priv, qh, qtd->urb, slot, qtd);
-
- or_map = isp1760_readl(hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- or_map |= queue_entry;
- isp1760_writel(or_map, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
-
- skip_map &= ~queue_entry;
- isp1760_writel(skip_map, hcd->regs + HC_INT_PTD_SKIPMAP_REG);
-
- buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
- buffstatus |= INT_BUFFER;
- isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
+ return (usb_pipebulk(qtd->urb->pipe) &&
+ (qtd->actual_length < qtd->length));
}
-static void isp1760_urb_done(struct isp1760_hcd *priv, struct urb *urb, int status)
-__releases(priv->lock)
-__acquires(priv->lock)
+static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ struct list_head *urb_list)
{
- if (!urb->unlinked) {
- if (status == -EINPROGRESS)
- status = 0;
- }
-
- /* complete() can reenter this HCD */
- usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
- spin_unlock(&priv->lock);
- usb_hcd_giveback_urb(priv_to_hcd(priv), urb, status);
- spin_lock(&priv->lock);
-}
+ int last_qtd;
+ struct isp1760_qtd *qtd, *qtd_next;
+ struct urb_listitem *urb_listitem;
-static void isp1760_qtd_free(struct isp1760_qtd *qtd)
-{
- kmem_cache_free(qtd_cachep, qtd);
-}
+ list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
+ if (qtd->status < QTD_XFER_COMPLETE)
+ break;
-static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd)
-{
- struct isp1760_qtd *tmp_qtd;
+ last_qtd = last_qtd_of_urb(qtd, qh);
+
+ if ((!last_qtd) && (qtd->status == QTD_RETIRE))
+ qtd_next->status = QTD_RETIRE;
+
+ if (qtd->status == QTD_XFER_COMPLETE) {
+ if (qtd->actual_length) {
+ switch (qtd->packet_type) {
+ case IN_PID:
+ mem_reads8(hcd->regs, qtd->payload_addr,
+ qtd->data_buffer,
+ qtd->actual_length);
+ /* Fall through (?) */
+ case OUT_PID:
+ qtd->urb->actual_length +=
+ qtd->actual_length;
+ /* Fall through ... */
+ case SETUP_PID:
+ break;
+ }
+ }
- tmp_qtd = qtd->hw_next;
- list_del(&qtd->qtd_list);
- isp1760_qtd_free(qtd);
- return tmp_qtd;
-}
+ if (is_short_bulk(qtd)) {
+ if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
+ qtd->urb->status = -EREMOTEIO;
+ if (!last_qtd)
+ qtd_next->status = QTD_RETIRE;
+ }
+ }
-/*
- * Remove this QTD from the QH list and free its memory. If this QTD
- * isn't the last one than remove also his successor(s).
- * Returns the QTD which is part of an new URB and should be enqueued.
- */
-static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd)
-{
- struct isp1760_qtd *tmp_qtd;
- int last_one;
+ if (qtd->payload_addr)
+ free_mem(hcd, qtd);
+
+ if (last_qtd) {
+ if ((qtd->status == QTD_RETIRE) &&
+ (qtd->urb->status == -EINPROGRESS))
+ qtd->urb->status = -EPIPE;
+ /* Defer calling of urb_done() since it releases lock */
+ urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
+ GFP_ATOMIC);
+ if (unlikely(!urb_listitem))
+ break; /* Try again on next call */
+ urb_listitem->urb = qtd->urb;
+ list_add_tail(&urb_listitem->urb_list, urb_list);
+ }
- do {
- tmp_qtd = qtd->hw_next;
- last_one = qtd->status & URB_COMPLETE_NOTIFY;
list_del(&qtd->qtd_list);
- isp1760_qtd_free(qtd);
- qtd = tmp_qtd;
- } while (!last_one && qtd);
-
- return qtd;
+ qtd_free(qtd);
+ }
}
-static void do_atl_int(struct usb_hcd *usb_hcd)
+#define ENQUEUE_DEPTH 2
+static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- u32 done_map, skip_map;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ int ptd_offset;
+ struct slotinfo *slots;
+ int curr_slot, free_slot;
+ int n;
struct ptd ptd;
- struct urb *urb = NULL;
- u32 atl_regs_base;
- u32 atl_regs;
- u32 queue_entry;
- u32 payload;
- u32 length;
- u32 or_map;
- u32 status = -EINVAL;
- int error;
struct isp1760_qtd *qtd;
- struct isp1760_qh *qh;
- u32 rl;
- u32 nakcount;
-
- done_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_DONEMAP_REG);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
- or_map = isp1760_readl(usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
- or_map &= ~done_map;
- isp1760_writel(or_map, usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
+ if (unlikely(list_empty(&qh->qtd_list))) {
+ WARN_ON(1);
+ return;
+ }
- atl_regs_base = ATL_REGS_OFFSET;
- while (done_map) {
- u32 dw1;
- u32 dw2;
- u32 dw3;
+ /* Make sure this endpoint's TT buffer is clean before queueing ptds */
+ if (qh->tt_buffer_dirty)
+ return;
- status = 0;
+ if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
+ qtd_list)->urb->pipe)) {
+ ptd_offset = INT_PTD_OFFSET;
+ slots = priv->int_slots;
+ } else {
+ ptd_offset = ATL_PTD_OFFSET;
+ slots = priv->atl_slots;
+ }
- queue_entry = __ffs(done_map);
- done_map &= ~(1 << queue_entry);
- skip_map |= 1 << queue_entry;
+ free_slot = -1;
+ for (curr_slot = 0; curr_slot < 32; curr_slot++) {
+ if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
+ free_slot = curr_slot;
+ if (slots[curr_slot].qh == qh)
+ break;
+ }
- atl_regs = atl_regs_base + queue_entry * sizeof(struct ptd);
+ n = 0;
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
+ if (qtd->status == QTD_ENQUEUED) {
+ WARN_ON(qtd->payload_addr);
+ alloc_mem(hcd, qtd);
+ if ((qtd->length) && (!qtd->payload_addr))
+ break;
- urb = priv->atl_ints[queue_entry].urb;
- qtd = priv->atl_ints[queue_entry].qtd;
- qh = priv->atl_ints[queue_entry].qh;
- payload = priv->atl_ints[queue_entry].payload;
+ if ((qtd->length) &&
+ ((qtd->packet_type == SETUP_PID) ||
+ (qtd->packet_type == OUT_PID))) {
+ mem_writes8(hcd->regs, qtd->payload_addr,
+ qtd->data_buffer, qtd->length);
+ }
- if (!qh) {
- printk(KERN_ERR "qh is 0\n");
- continue;
+ qtd->status = QTD_PAYLOAD_ALLOC;
}
- isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
- HC_MEMORY_REG);
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
- /*
- * write bank1 address twice to ensure the 90ns delay (time
- * between BANK0 write and the priv_read_copy() call is at
- * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 109ns)
- */
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
-
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
- ISP_BANK(0), sizeof(ptd));
- dw1 = le32_to_cpu(ptd.dw1);
- dw2 = le32_to_cpu(ptd.dw2);
- dw3 = le32_to_cpu(ptd.dw3);
- rl = (dw2 >> 25) & 0x0f;
- nakcount = (dw3 >> 19) & 0xf;
-
- /* Transfer Error, *but* active and no HALT -> reload */
- if ((dw3 & DW3_ERROR_BIT) && (dw3 & DW3_QTD_ACTIVE) &&
- !(dw3 & DW3_HALT_BIT)) {
-
- /* according to ppriv code, we have to
- * reload this one if trasfered bytes != requested bytes
- * else act like everything went smooth..
- * XXX This just doesn't feel right and hasn't
- * triggered so far.
- */
+ if (qtd->status == QTD_PAYLOAD_ALLOC) {
+/*
+ if ((curr_slot > 31) && (free_slot == -1))
+ dev_dbg(hcd->self.controller, "%s: No slot "
+ "available for transfer\n", __func__);
+*/
+ /* Start xfer for this endpoint if not already done */
+ if ((curr_slot > 31) && (free_slot > -1)) {
+ if (usb_pipeint(qtd->urb->pipe))
+ create_ptd_int(qh, qtd, &ptd);
+ else
+ create_ptd_atl(qh, qtd, &ptd);
+
+ start_bus_transfer(hcd, ptd_offset, free_slot,
+ slots, qtd, qh, &ptd);
+ curr_slot = free_slot;
+ }
- length = PTD_XFERRED_LENGTH(dw3);
- printk(KERN_ERR "Should reload now.... transfered %d "
- "of %zu\n", length, qtd->length);
- BUG();
+ n++;
+ if (n >= ENQUEUE_DEPTH)
+ break;
}
+ }
+}
- if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) {
- u32 buffstatus;
+static void schedule_ptds(struct usb_hcd *hcd)
+{
+ struct isp1760_hcd *priv;
+ struct isp1760_qh *qh, *qh_next;
+ struct list_head *ep_queue;
+ LIST_HEAD(urb_list);
+ struct urb_listitem *urb_listitem, *urb_listitem_next;
+ int i;
- /* XXX
- * NAKs are handled in HW by the chip. Usually if the
- * device is not able to send data fast enough.
- * This did not trigger for a long time now.
- */
- printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: "
- "%d of %zu done: %08x cur: %08x\n", qtd,
- urb, qh, PTD_XFERRED_LENGTH(dw3),
- qtd->length, done_map,
- (1 << queue_entry));
+ if (!hcd) {
+ WARN_ON(1);
+ return;
+ }
- /* RL counter = ERR counter */
- dw3 &= ~(0xf << 19);
- dw3 |= rl << 19;
- dw3 &= ~(3 << (55 - 32));
- dw3 |= ERR_COUNTER << (55 - 32);
-
- /*
- * It is not needed to write skip map back because it
- * is unchanged. Just make sure that this entry is
- * unskipped once it gets written to the HW.
- */
- skip_map &= ~(1 << queue_entry);
- or_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_IRQ_MASK_OR_REG);
- or_map |= 1 << queue_entry;
- isp1760_writel(or_map, usb_hcd->regs +
- HC_ATL_IRQ_MASK_OR_REG);
-
- ptd.dw3 = cpu_to_le32(dw3);
- priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
- atl_regs, sizeof(ptd));
-
- ptd.dw0 |= __constant_cpu_to_le32(PTD_VALID);
- priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
- atl_regs, sizeof(ptd));
-
- buffstatus = isp1760_readl(usb_hcd->regs +
- HC_BUFFER_STATUS_REG);
- buffstatus |= ATL_BUFFER;
- isp1760_writel(buffstatus, usb_hcd->regs +
- HC_BUFFER_STATUS_REG);
- continue;
- }
+ priv = hcd_to_priv(hcd);
- error = check_error(&ptd);
- if (error) {
- status = error;
- priv->atl_ints[queue_entry].qh->toggle = 0;
- priv->atl_ints[queue_entry].qh->ping = 0;
- urb->status = -EPIPE;
-
-#if 0
- printk(KERN_ERR "Error in %s().\n", __func__);
- printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
- "dw3: %08x dw4: %08x dw5: %08x dw6: "
- "%08x dw7: %08x\n",
- ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
- ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
-#endif
- } else {
- if (usb_pipetype(urb->pipe) == PIPE_BULK) {
- priv->atl_ints[queue_entry].qh->toggle = dw3 &
- (1 << 25);
- priv->atl_ints[queue_entry].qh->ping = dw3 &
- (1 << 26);
- }
+ /*
+ * check finished/retired xfers, transfer payloads, call urb_done()
+ */
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
+ list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
+ collect_qtds(hcd, qh, &urb_list);
+ if (list_empty(&qh->qtd_list))
+ list_del(&qh->qh_list);
}
+ }
- length = PTD_XFERRED_LENGTH(dw3);
- if (length) {
- switch (DW1_GET_PID(dw1)) {
- case IN_PID:
- priv_read_copy(priv,
- priv->atl_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload + ISP_BANK(1),
- length);
-
- case OUT_PID:
-
- urb->actual_length += length;
+ list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
+ urb_list) {
+ isp1760_urb_done(hcd, urb_listitem->urb);
+ kmem_cache_free(urb_listitem_cachep, urb_listitem);
+ }
- case SETUP_PID:
- break;
- }
- }
+ /*
+ * Schedule packets for transfer.
+ *
+ * According to USB2.0 specification:
+ *
+ * 1st prio: interrupt xfers, up to 80 % of bandwidth
+ * 2nd prio: control xfers
+ * 3rd prio: bulk xfers
+ *
+ * ... but let's use a simpler scheme here (mostly because ISP1761 doc
+ * is very unclear on how to prioritize traffic):
+ *
+ * 1) Enqueue any queued control transfers, as long as payload chip mem
+ * and PTD ATL slots are available.
+ * 2) Enqueue any queued INT transfers, as long as payload chip mem
+ * and PTD INT slots are available.
+ * 3) Enqueue any queued bulk transfers, as long as payload chip mem
+ * and PTD ATL slots are available.
+ *
+ * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
+ * conservation of chip mem and performance.
+ *
+ * I'm sure this scheme could be improved upon!
+ */
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
+ list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
+ enqueue_qtds(hcd, qh);
+ }
+}
- priv->atl_ints[queue_entry].data_buffer = NULL;
- priv->atl_ints[queue_entry].urb = NULL;
- priv->atl_ints[queue_entry].qtd = NULL;
- priv->atl_ints[queue_entry].qh = NULL;
+#define PTD_STATE_QTD_DONE 1
+#define PTD_STATE_QTD_RELOAD 2
+#define PTD_STATE_URB_RETIRE 3
- free_mem(priv, payload);
+static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
+ struct urb *urb)
+{
+ __dw dw4;
+ int i;
- isp1760_writel(skip_map, usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
+ dw4 = ptd->dw4;
+ dw4 >>= 8;
- if (urb->status == -EPIPE) {
- /* HALT was received */
+ /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
+ need to handle these errors? Is it done in hardware? */
- qtd = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ if (ptd->dw3 & DW3_HALT_BIT) {
- } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
- /* short BULK received */
+ urb->status = -EPROTO; /* Default unknown error */
- if (urb->transfer_flags & URB_SHORT_NOT_OK) {
- urb->status = -EREMOTEIO;
- isp1760_dbg(priv, "short bulk, %d instead %zu "
- "with URB_SHORT_NOT_OK flag.\n",
- length, qtd->length);
+ for (i = 0; i < 8; i++) {
+ switch (dw4 & 0x7) {
+ case INT_UNDERRUN:
+ dev_dbg(hcd->self.controller, "%s: underrun "
+ "during uFrame %d\n",
+ __func__, i);
+ urb->status = -ECOMM; /* Could not write data */
+ break;
+ case INT_EXACT:
+ dev_dbg(hcd->self.controller, "%s: transaction "
+ "error during uFrame %d\n",
+ __func__, i);
+ urb->status = -EPROTO; /* timeout, bad CRC, PID
+ error etc. */
+ break;
+ case INT_BABBLE:
+ dev_dbg(hcd->self.controller, "%s: babble "
+ "error during uFrame %d\n",
+ __func__, i);
+ urb->status = -EOVERFLOW;
+ break;
}
+ dw4 >>= 3;
+ }
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
-
- qtd = clean_up_qtdlist(qtd);
-
- isp1760_urb_done(priv, urb, urb->status);
-
- } else if (qtd->status & URB_COMPLETE_NOTIFY) {
- /* that was the last qtd of that URB */
-
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
-
- qtd = clean_this_qtd(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ return PTD_STATE_URB_RETIRE;
+ }
- } else {
- /* next QTD of this URB */
+ return PTD_STATE_QTD_DONE;
+}
- qtd = clean_this_qtd(qtd);
- BUG_ON(!qtd);
- }
+static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
+ struct urb *urb)
+{
+ WARN_ON(!ptd);
+ if (ptd->dw3 & DW3_HALT_BIT) {
+ if (ptd->dw3 & DW3_BABBLE_BIT)
+ urb->status = -EOVERFLOW;
+ else if (FROM_DW3_CERR(ptd->dw3))
+ urb->status = -EPIPE; /* Stall */
+ else if (ptd->dw3 & DW3_ERROR_BIT)
+ urb->status = -EPROTO; /* XactErr */
+ else
+ urb->status = -EPROTO; /* Unknown */
+/*
+ dev_dbg(hcd->self.controller, "%s: ptd error:\n"
+ " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
+ " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
+ __func__,
+ ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
+ ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
+*/
+ return PTD_STATE_URB_RETIRE;
+ }
- if (qtd)
- enqueue_an_ATL_packet(usb_hcd, qh, qtd);
+ if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
+ /* Transfer Error, *but* active and no HALT -> reload */
+ dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
+ return PTD_STATE_QTD_RELOAD;
+ }
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_ATL_PTD_SKIPMAP_REG);
+ if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
+ /*
+ * NAKs are handled in HW by the chip. Usually if the
+ * device is not able to send data fast enough.
+ * This happens mostly on slower hardware.
+ */
+ return PTD_STATE_QTD_RELOAD;
}
+
+ return PTD_STATE_QTD_DONE;
}
-static void do_intl_int(struct usb_hcd *usb_hcd)
+static void handle_done_ptds(struct usb_hcd *hcd)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- u32 done_map, skip_map;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct ptd ptd;
- struct urb *urb = NULL;
- u32 int_regs;
- u32 int_regs_base;
- u32 payload;
- u32 length;
- u32 or_map;
- int error;
- u32 queue_entry;
- struct isp1760_qtd *qtd;
struct isp1760_qh *qh;
+ int slot;
+ int state;
+ struct slotinfo *slots;
+ u32 ptd_offset;
+ struct isp1760_qtd *qtd;
+ int modified;
+ int skip_map;
+
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ priv->int_done_map &= ~skip_map;
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ priv->atl_done_map &= ~skip_map;
+
+ modified = priv->int_done_map || priv->atl_done_map;
+
+ while (priv->int_done_map || priv->atl_done_map) {
+ if (priv->int_done_map) {
+ /* INT ptd */
+ slot = __ffs(priv->int_done_map);
+ priv->int_done_map &= ~(1 << slot);
+ slots = priv->int_slots;
+ /* This should not trigger, and could be removed if
+ noone have any problems with it triggering: */
+ if (!slots[slot].qh) {
+ WARN_ON(1);
+ continue;
+ }
+ ptd_offset = INT_PTD_OFFSET;
+ ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
+ state = check_int_transfer(hcd, &ptd,
+ slots[slot].qtd->urb);
+ } else {
+ /* ATL ptd */
+ slot = __ffs(priv->atl_done_map);
+ priv->atl_done_map &= ~(1 << slot);
+ slots = priv->atl_slots;
+ /* This should not trigger, and could be removed if
+ noone have any problems with it triggering: */
+ if (!slots[slot].qh) {
+ WARN_ON(1);
+ continue;
+ }
+ ptd_offset = ATL_PTD_OFFSET;
+ ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ state = check_atl_transfer(hcd, &ptd,
+ slots[slot].qtd->urb);
+ }
- done_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_DONEMAP_REG);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
-
- or_map = isp1760_readl(usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
- or_map &= ~done_map;
- isp1760_writel(or_map, usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
-
- int_regs_base = INT_REGS_OFFSET;
-
- while (done_map) {
- u32 dw1;
- u32 dw3;
+ qtd = slots[slot].qtd;
+ slots[slot].qtd = NULL;
+ qh = slots[slot].qh;
+ slots[slot].qh = NULL;
+ qh->slot = -1;
+
+ WARN_ON(qtd->status != QTD_XFER_STARTED);
+
+ switch (state) {
+ case PTD_STATE_QTD_DONE:
+ if ((usb_pipeint(qtd->urb->pipe)) &&
+ (qtd->urb->dev->speed != USB_SPEED_HIGH))
+ qtd->actual_length =
+ FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
+ else
+ qtd->actual_length =
+ FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
+
+ qtd->status = QTD_XFER_COMPLETE;
+ if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
+ is_short_bulk(qtd))
+ qtd = NULL;
+ else
+ qtd = list_entry(qtd->qtd_list.next,
+ typeof(*qtd), qtd_list);
+
+ qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
+ qh->ping = FROM_DW3_PING(ptd.dw3);
+ break;
- queue_entry = __ffs(done_map);
- done_map &= ~(1 << queue_entry);
- skip_map |= 1 << queue_entry;
+ case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
+ qtd->status = QTD_PAYLOAD_ALLOC;
+ ptd.dw0 |= DW0_VALID_BIT;
+ /* RL counter = ERR counter */
+ ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
+ ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
+ ptd.dw3 &= ~TO_DW3_CERR(3);
+ ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
+ qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
+ qh->ping = FROM_DW3_PING(ptd.dw3);
+ break;
- int_regs = int_regs_base + queue_entry * sizeof(struct ptd);
- urb = priv->int_ints[queue_entry].urb;
- qtd = priv->int_ints[queue_entry].qtd;
- qh = priv->int_ints[queue_entry].qh;
- payload = priv->int_ints[queue_entry].payload;
+ case PTD_STATE_URB_RETIRE:
+ qtd->status = QTD_RETIRE;
+ if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
+ (qtd->urb->status != -EPIPE) &&
+ (qtd->urb->status != -EREMOTEIO)) {
+ qh->tt_buffer_dirty = 1;
+ if (usb_hub_clear_tt_buffer(qtd->urb))
+ /* Clear failed; let's hope things work
+ anyway */
+ qh->tt_buffer_dirty = 0;
+ }
+ qtd = NULL;
+ qh->toggle = 0;
+ qh->ping = 0;
+ break;
- if (!qh) {
- printk(KERN_ERR "(INT) qh is 0\n");
+ default:
+ WARN_ON(1);
continue;
}
- isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
- HC_MEMORY_REG);
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
- /*
- * write bank1 address twice to ensure the 90ns delay (time
- * between BANK0 write and the priv_read_copy() call is at
- * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
- */
- isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
- HC_MEMORY_REG);
-
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
- ISP_BANK(0), sizeof(ptd));
- dw1 = le32_to_cpu(ptd.dw1);
- dw3 = le32_to_cpu(ptd.dw3);
- check_int_err_status(le32_to_cpu(ptd.dw4));
-
- error = check_error(&ptd);
- if (error) {
-#if 0
- printk(KERN_ERR "Error in %s().\n", __func__);
- printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
- "dw3: %08x dw4: %08x dw5: %08x dw6: "
- "%08x dw7: %08x\n",
- ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
- ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
-#endif
- urb->status = -EPIPE;
- priv->int_ints[queue_entry].qh->toggle = 0;
- priv->int_ints[queue_entry].qh->ping = 0;
+ if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
+ if (slots == priv->int_slots) {
+ if (state == PTD_STATE_QTD_RELOAD)
+ dev_err(hcd->self.controller,
+ "%s: PTD_STATE_QTD_RELOAD on "
+ "interrupt packet\n", __func__);
+ if (state != PTD_STATE_QTD_RELOAD)
+ create_ptd_int(qh, qtd, &ptd);
+ } else {
+ if (state != PTD_STATE_QTD_RELOAD)
+ create_ptd_atl(qh, qtd, &ptd);
+ }
- } else {
- priv->int_ints[queue_entry].qh->toggle =
- dw3 & (1 << 25);
- priv->int_ints[queue_entry].qh->ping = dw3 & (1 << 26);
+ start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
+ qh, &ptd);
}
+ }
- if (urb->dev->speed != USB_SPEED_HIGH)
- length = PTD_XFERRED_LENGTH_LO(dw3);
- else
- length = PTD_XFERRED_LENGTH(dw3);
-
- if (length) {
- switch (DW1_GET_PID(dw1)) {
- case IN_PID:
- priv_read_copy(priv,
- priv->int_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload + ISP_BANK(1),
- length);
- case OUT_PID:
-
- urb->actual_length += length;
+ if (modified)
+ schedule_ptds(hcd);
+}
- case SETUP_PID:
- break;
- }
- }
+static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ u32 imask;
+ irqreturn_t irqret = IRQ_NONE;
- priv->int_ints[queue_entry].data_buffer = NULL;
- priv->int_ints[queue_entry].urb = NULL;
- priv->int_ints[queue_entry].qtd = NULL;
- priv->int_ints[queue_entry].qh = NULL;
+ spin_lock(&priv->lock);
- isp1760_writel(skip_map, usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- free_mem(priv, payload);
+ if (!(hcd->state & HC_STATE_RUNNING))
+ goto leave;
- if (urb->status == -EPIPE) {
- /* HALT received */
+ imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
+ if (unlikely(!imask))
+ goto leave;
+ reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
- qtd = clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
+ priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
- } else if (qtd->status & URB_COMPLETE_NOTIFY) {
+ handle_done_ptds(hcd);
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
+ irqret = IRQ_HANDLED;
+leave:
+ spin_unlock(&priv->lock);
- qtd = clean_this_qtd(qtd);
- isp1760_urb_done(priv, urb, urb->status);
+ return irqret;
+}
- } else {
- /* next QTD of this URB */
+/*
+ * Workaround for problem described in chip errata 2:
+ *
+ * Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
+ * One solution suggested in the errata is to use SOF interrupts _instead_of_
+ * ATL done interrupts (the "instead of" might be important since it seems
+ * enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
+ * to set the PTD's done bit in addition to not generating an interrupt!).
+ *
+ * So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
+ * done bit is not being set. This is bad - it blocks the endpoint until reboot.
+ *
+ * If we use SOF interrupts only, we get latency between ptd completion and the
+ * actual handling. This is very noticeable in testusb runs which takes several
+ * minutes longer without ATL interrupts.
+ *
+ * A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
+ * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
+ * slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
+ * completed and its done map bit is set.
+ *
+ * The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
+ * not to cause too much lag when this HW bug occurs, while still hopefully
+ * ensuring that the check does not falsely trigger.
+ */
+#define SLOT_TIMEOUT 300
+#define SLOT_CHECK_PERIOD 200
+static struct timer_list errata2_timer;
- qtd = clean_this_qtd(qtd);
- BUG_ON(!qtd);
+static void errata2_function(unsigned long data)
+{
+ struct usb_hcd *hcd = (struct usb_hcd *) data;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ int slot;
+ struct ptd ptd;
+ unsigned long spinflags;
+
+ spin_lock_irqsave(&priv->lock, spinflags);
+
+ for (slot = 0; slot < 32; slot++)
+ if (priv->atl_slots[slot].qh && time_after(jiffies,
+ priv->atl_slots[slot].timestamp +
+ SLOT_TIMEOUT * HZ / 1000)) {
+ ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
+ if (!FROM_DW0_VALID(ptd.dw0) &&
+ !FROM_DW3_ACTIVE(ptd.dw3))
+ priv->atl_done_map |= 1 << slot;
}
- if (qtd)
- enqueue_an_INT_packet(usb_hcd, qh, qtd);
+ if (priv->atl_done_map)
+ handle_done_ptds(hcd);
- skip_map = isp1760_readl(usb_hcd->regs +
- HC_INT_PTD_SKIPMAP_REG);
- }
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+
+ errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+ add_timer(&errata2_timer);
}
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-static struct isp1760_qh *qh_make(struct isp1760_hcd *priv, struct urb *urb,
- gfp_t flags)
+static int isp1760_run(struct usb_hcd *hcd)
{
- struct isp1760_qh *qh;
- int is_input, type;
-
- qh = isp1760_qh_alloc(priv, flags);
- if (!qh)
- return qh;
+ int retval;
+ u32 temp;
+ u32 command;
+ u32 chipid;
- /*
- * init endpoint/device data for this QH
- */
- is_input = usb_pipein(urb->pipe);
- type = usb_pipetype(urb->pipe);
+ hcd->uses_new_polling = 1;
- if (type == PIPE_INTERRUPT) {
+ hcd->state = HC_STATE_RUNNING;
- if (urb->dev->speed == USB_SPEED_HIGH) {
+ /* Set PTD interrupt AND & OR maps */
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
+ reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
+ /* step 23 passed */
- qh->period = urb->interval >> 3;
- if (qh->period == 0 && urb->interval != 1) {
- /* NOTE interval 2 or 4 uframes could work.
- * But interval 1 scheduling is simpler, and
- * includes high bandwidth.
- */
- printk(KERN_ERR "intr period %d uframes, NYET!",
- urb->interval);
- qh_destroy(qh);
- return NULL;
- }
- } else {
- qh->period = urb->interval;
- }
- }
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
- /* support for tt scheduling, and access to toggles */
- qh->dev = urb->dev;
+ command = reg_read32(hcd->regs, HC_USBCMD);
+ command &= ~(CMD_LRESET|CMD_RESET);
+ command |= CMD_RUN;
+ reg_write32(hcd->regs, HC_USBCMD, command);
- if (!usb_pipecontrol(urb->pipe))
- usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input,
- 1);
- return qh;
-}
+ retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
+ if (retval)
+ return retval;
-/*
- * For control/bulk/interrupt, return QH with these TDs appended.
- * Allocates and initializes the QH if necessary.
- * Returns null if it can't allocate a QH it needs to.
- * If the QH has TDs (urbs) already, that's great.
- */
-static struct isp1760_qh *qh_append_tds(struct isp1760_hcd *priv,
- struct urb *urb, struct list_head *qtd_list, int epnum,
- void **ptr)
-{
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
- struct isp1760_qtd *prev_qtd;
+ /*
+ * XXX
+ * Spec says to write FLAG_CF as last config action, priv code grabs
+ * the semaphore while doing so.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
- qh = (struct isp1760_qh *)*ptr;
- if (!qh) {
- /* can't sleep here, we have priv->lock... */
- qh = qh_make(priv, urb, GFP_ATOMIC);
- if (!qh)
- return qh;
- *ptr = qh;
- }
+ retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
+ up_write(&ehci_cf_port_reset_rwsem);
+ if (retval)
+ return retval;
- qtd = list_entry(qtd_list->next, struct isp1760_qtd,
- qtd_list);
- if (!list_empty(&qh->qtd_list))
- prev_qtd = list_entry(qh->qtd_list.prev,
- struct isp1760_qtd, qtd_list);
- else
- prev_qtd = NULL;
+ init_timer(&errata2_timer);
+ errata2_timer.function = errata2_function;
+ errata2_timer.data = (unsigned long) hcd;
+ errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+ add_timer(&errata2_timer);
- list_splice(qtd_list, qh->qtd_list.prev);
- if (prev_qtd) {
- BUG_ON(prev_qtd->hw_next);
- prev_qtd->hw_next = qtd;
- }
+ chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
+ dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
+ chipid & 0xffff, chipid >> 16);
- urb->hcpriv = qh;
- return qh;
-}
-
-static void qtd_list_free(struct isp1760_hcd *priv, struct urb *urb,
- struct list_head *qtd_list)
-{
- struct list_head *entry, *temp;
+ /* PTD Register Init Part 2, Step 28 */
- list_for_each_safe(entry, temp, qtd_list) {
- struct isp1760_qtd *qtd;
+ /* Setup registers controlling PTD checking */
+ reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
+ reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
+ reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
+ ATL_BUF_FILL | INT_BUF_FILL);
- qtd = list_entry(entry, struct isp1760_qtd, qtd_list);
- list_del(&qtd->qtd_list);
- isp1760_qtd_free(qtd);
- }
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ return 0;
}
-static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
- struct list_head *qtd_list, gfp_t mem_flags, packet_enqueue *p)
+static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
{
- struct isp1760_qtd *qtd;
- int epnum;
- unsigned long flags;
- struct isp1760_qh *qh = NULL;
- int rc;
- int qh_busy;
-
- qtd = list_entry(qtd_list->next, struct isp1760_qtd, qtd_list);
- epnum = urb->ep->desc.bEndpointAddress;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &priv_to_hcd(priv)->flags)) {
- rc = -ESHUTDOWN;
- goto done;
- }
- rc = usb_hcd_link_urb_to_ep(priv_to_hcd(priv), urb);
- if (rc)
- goto done;
-
- qh = urb->ep->hcpriv;
- if (qh)
- qh_busy = !list_empty(&qh->qtd_list);
- else
- qh_busy = 0;
-
- qh = qh_append_tds(priv, urb, qtd_list, epnum, &urb->ep->hcpriv);
- if (!qh) {
- usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
- rc = -ENOMEM;
- goto done;
- }
+ qtd->data_buffer = databuffer;
- if (!qh_busy)
- p(priv_to_hcd(priv), qh, qtd);
+ if (len > MAX_PAYLOAD_SIZE)
+ len = MAX_PAYLOAD_SIZE;
+ qtd->length = len;
-done:
- spin_unlock_irqrestore(&priv->lock, flags);
- if (!qh)
- qtd_list_free(priv, urb, qtd_list);
- return rc;
+ return qtd->length;
}
-static struct isp1760_qtd *isp1760_qtd_alloc(struct isp1760_hcd *priv,
- gfp_t flags)
+static void qtd_list_free(struct list_head *qtd_list)
{
- struct isp1760_qtd *qtd;
-
- qtd = kmem_cache_zalloc(qtd_cachep, flags);
- if (qtd)
- INIT_LIST_HEAD(&qtd->qtd_list);
+ struct isp1760_qtd *qtd, *qtd_next;
- return qtd;
+ list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
+ list_del(&qtd->qtd_list);
+ qtd_free(qtd);
+ }
}
/*
- * create a list of filled qtds for this URB; won't link into qh.
+ * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
+ * Also calculate the PID type (SETUP/IN/OUT) for each packet.
*/
-static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+static void packetize_urb(struct usb_hcd *hcd,
struct urb *urb, struct list_head *head, gfp_t flags)
{
- struct isp1760_qtd *qtd, *qtd_prev;
+ struct isp1760_qtd *qtd;
void *buf;
- int len, maxpacket;
- int is_input;
- u32 token;
+ int len, maxpacketsize;
+ u8 packet_type;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
- qtd = isp1760_qtd_alloc(priv, flags);
- if (!qtd)
- return NULL;
- list_add_tail(&qtd->qtd_list, head);
- qtd->urb = urb;
- urb->status = -EINPROGRESS;
+ if (!urb->transfer_buffer && urb->transfer_buffer_length) {
+ /* XXX This looks like usb storage / SCSI bug */
+ dev_err(hcd->self.controller,
+ "buf is null, dma is %08lx len is %d\n",
+ (long unsigned)urb->transfer_dma,
+ urb->transfer_buffer_length);
+ WARN_ON(1);
+ }
- token = 0;
- /* for split transactions, SplitXState initialized to zero */
+ if (usb_pipein(urb->pipe))
+ packet_type = IN_PID;
+ else
+ packet_type = OUT_PID;
- len = urb->transfer_buffer_length;
- is_input = usb_pipein(urb->pipe);
if (usb_pipecontrol(urb->pipe)) {
- /* SETUP pid */
- qtd_fill(qtd, urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- token | SETUP_PID);
-
- /* ... and always at least one more pid */
- token ^= DATA_TOGGLE;
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = qtd_alloc(flags, urb, SETUP_PID);
if (!qtd)
goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = qtd;
+ qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
- if (len == 0)
- token |= IN_PID;
+ if (urb->transfer_buffer_length == 0)
+ packet_type = IN_PID;
}
- /*
- * data transfer stage: buffer setup
- */
- buf = urb->transfer_buffer;
-
- if (is_input)
- token |= IN_PID;
- else
- token |= OUT_PID;
-
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+ maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->pipe)));
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
+ buf = urb->transfer_buffer;
+ len = urb->transfer_buffer_length;
+
for (;;) {
int this_qtd_len;
- if (!buf && len) {
- /* XXX This looks like usb storage / SCSI bug */
- printk(KERN_ERR "buf is null, dma is %08lx len is %d\n",
- (long unsigned)urb->transfer_dma, len);
- WARN_ON(1);
- }
+ qtd = qtd_alloc(flags, urb, packet_type);
+ if (!qtd)
+ goto cleanup;
+ this_qtd_len = qtd_fill(qtd, buf, len);
+ list_add_tail(&qtd->qtd_list, head);
- this_qtd_len = qtd_fill(qtd, buf, len, token);
len -= this_qtd_len;
buf += this_qtd_len;
- /* qh makes control packets use qtd toggle; maybe switch it */
- if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
- token ^= DATA_TOGGLE;
-
if (len <= 0)
break;
-
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
- if (!qtd)
- goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = qtd;
- list_add_tail(&qtd->qtd_list, head);
}
/*
@@ -1571,167 +1488,247 @@ static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
- /* "in" <--> "out" */
- token ^= IN_PID;
- /* force DATA1 */
- token |= DATA_TOGGLE;
+ if (packet_type == IN_PID)
+ packet_type = OUT_PID;
+ else
+ packet_type = IN_PID;
} else if (usb_pipebulk(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
- && !(urb->transfer_buffer_length % maxpacket)) {
+ && !(urb->transfer_buffer_length %
+ maxpacketsize)) {
one_more = 1;
}
if (one_more) {
- qtd_prev = qtd;
- qtd = isp1760_qtd_alloc(priv, flags);
+ qtd = qtd_alloc(flags, urb, packet_type);
if (!qtd)
goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = qtd;
- list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
- qtd_fill(qtd, NULL, 0, token);
+ qtd_fill(qtd, NULL, 0);
+ list_add_tail(&qtd->qtd_list, head);
}
}
- qtd->status = URB_COMPLETE_NOTIFY;
- return head;
+ return;
cleanup:
- qtd_list_free(priv, urb, head);
- return NULL;
+ qtd_list_free(head);
}
static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
- struct list_head qtd_list;
- packet_enqueue *pe;
-
- INIT_LIST_HEAD(&qtd_list);
+ struct list_head *ep_queue;
+ struct isp1760_qh *qh, *qhit;
+ unsigned long spinflags;
+ LIST_HEAD(new_qtds);
+ int retval;
+ int qh_in_queue;
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
+ ep_queue = &priv->qh_list[QH_CONTROL];
+ break;
case PIPE_BULK:
-
- if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
- return -ENOMEM;
- pe = enqueue_an_ATL_packet;
+ ep_queue = &priv->qh_list[QH_BULK];
break;
-
case PIPE_INTERRUPT:
- if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
- return -ENOMEM;
- pe = enqueue_an_INT_packet;
+ if (urb->interval < 0)
+ return -EINVAL;
+ /* FIXME: Check bandwidth */
+ ep_queue = &priv->qh_list[QH_INTERRUPT];
break;
-
case PIPE_ISOCHRONOUS:
- printk(KERN_ERR "PIPE_ISOCHRONOUS ain't supported\n");
+ dev_err(hcd->self.controller, "%s: isochronous USB packets "
+ "not yet supported\n",
+ __func__);
+ return -EPIPE;
default:
+ dev_err(hcd->self.controller, "%s: unknown pipe type\n",
+ __func__);
return -EPIPE;
}
- return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
-}
+ if (usb_pipein(urb->pipe))
+ urb->actual_length = 0;
-static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
- int status)
-{
- struct isp1760_hcd *priv = hcd_to_priv(hcd);
- struct inter_packet_info *ints;
- u32 i;
- u32 reg_base, or_reg, skip_reg;
- unsigned long flags;
- struct ptd ptd;
+ packetize_urb(hcd, urb, &new_qtds, mem_flags);
+ if (list_empty(&new_qtds))
+ return -ENOMEM;
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- return -EPIPE;
- break;
+ retval = 0;
+ spin_lock_irqsave(&priv->lock, spinflags);
- case PIPE_INTERRUPT:
- ints = priv->int_ints;
- reg_base = INT_REGS_OFFSET;
- or_reg = HC_INT_IRQ_MASK_OR_REG;
- skip_reg = HC_INT_PTD_SKIPMAP_REG;
- break;
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ retval = -ESHUTDOWN;
+ qtd_list_free(&new_qtds);
+ goto out;
+ }
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval) {
+ qtd_list_free(&new_qtds);
+ goto out;
+ }
- default:
- ints = priv->atl_ints;
- reg_base = ATL_REGS_OFFSET;
- or_reg = HC_ATL_IRQ_MASK_OR_REG;
- skip_reg = HC_ATL_PTD_SKIPMAP_REG;
- break;
+ qh = urb->ep->hcpriv;
+ if (qh) {
+ qh_in_queue = 0;
+ list_for_each_entry(qhit, ep_queue, qh_list) {
+ if (qhit == qh) {
+ qh_in_queue = 1;
+ break;
+ }
+ }
+ if (!qh_in_queue)
+ list_add_tail(&qh->qh_list, ep_queue);
+ } else {
+ qh = qh_alloc(GFP_ATOMIC);
+ if (!qh) {
+ retval = -ENOMEM;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ qtd_list_free(&new_qtds);
+ goto out;
+ }
+ list_add_tail(&qh->qh_list, ep_queue);
+ urb->ep->hcpriv = qh;
}
- memset(&ptd, 0, sizeof(ptd));
- spin_lock_irqsave(&priv->lock, flags);
+ list_splice_tail(&new_qtds, &qh->qtd_list);
+ schedule_ptds(hcd);
- for (i = 0; i < 32; i++) {
- if (ints->urb == urb) {
- u32 skip_map;
- u32 or_map;
- struct isp1760_qtd *qtd;
+out:
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+ return retval;
+}
- skip_map = isp1760_readl(hcd->regs + skip_reg);
- skip_map |= 1 << i;
- isp1760_writel(skip_map, hcd->regs + skip_reg);
+static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
+ struct isp1760_qh *qh)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ int skip_map;
+
+ WARN_ON(qh->slot == -1);
+
+ /* We need to forcefully reclaim the slot since some transfers never
+ return, e.g. interrupt transfers and NAKed bulk transfers. */
+ if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
+ skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
+ skip_map |= (1 << qh->slot);
+ reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
+ priv->atl_slots[qh->slot].qh = NULL;
+ priv->atl_slots[qh->slot].qtd = NULL;
+ } else {
+ skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
+ skip_map |= (1 << qh->slot);
+ reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
+ priv->int_slots[qh->slot].qh = NULL;
+ priv->int_slots[qh->slot].qtd = NULL;
+ }
- or_map = isp1760_readl(hcd->regs + or_reg);
- or_map &= ~(1 << i);
- isp1760_writel(or_map, hcd->regs + or_reg);
+ qh->slot = -1;
+}
- priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
- + i * sizeof(ptd), sizeof(ptd));
- qtd = ints->qtd;
+/*
+ * Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
+ * any active transfer belonging to the urb in the process.
+ */
+static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
+ struct isp1760_qtd *qtd)
+{
+ struct urb *urb;
+ int urb_was_running;
- clean_up_qtdlist(qtd);
+ urb = qtd->urb;
+ urb_was_running = 0;
+ list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
+ if (qtd->urb != urb)
+ break;
+
+ if (qtd->status >= QTD_XFER_STARTED)
+ urb_was_running = 1;
+ if (last_qtd_of_urb(qtd, qh) &&
+ (qtd->status >= QTD_XFER_COMPLETE))
+ urb_was_running = 0;
- free_mem(priv, ints->payload);
+ if (qtd->status == QTD_XFER_STARTED)
+ kill_transfer(hcd, urb, qh);
+ qtd->status = QTD_RETIRE;
+ }
- ints->urb = NULL;
- ints->qh = NULL;
- ints->qtd = NULL;
- ints->data_buffer = NULL;
- ints->payload = 0;
+ if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
+ qh->tt_buffer_dirty = 1;
+ if (usb_hub_clear_tt_buffer(urb))
+ /* Clear failed; let's hope things work anyway */
+ qh->tt_buffer_dirty = 0;
+ }
+}
- isp1760_urb_done(priv, urb, status);
+static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ unsigned long spinflags;
+ struct isp1760_qh *qh;
+ struct isp1760_qtd *qtd;
+ int retval = 0;
+
+ spin_lock_irqsave(&priv->lock, spinflags);
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval)
+ goto out;
+
+ qh = urb->ep->hcpriv;
+ if (!qh) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
+ if (qtd->urb == urb) {
+ dequeue_urb_from_qtd(hcd, qh, qtd);
+ list_move(&qtd->qtd_list, &qh->qtd_list);
break;
}
- ints++;
- }
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ urb->status = status;
+ schedule_ptds(hcd);
+
+out:
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+ return retval;
}
-static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd)
+static void isp1760_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- u32 imask;
- irqreturn_t irqret = IRQ_NONE;
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ unsigned long spinflags;
+ struct isp1760_qh *qh, *qh_iter;
+ int i;
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, spinflags);
- if (!(usb_hcd->state & HC_STATE_RUNNING))
- goto leave;
+ qh = ep->hcpriv;
+ if (!qh)
+ goto out;
- imask = isp1760_readl(usb_hcd->regs + HC_INTERRUPT_REG);
- if (unlikely(!imask))
- goto leave;
+ WARN_ON(!list_empty(&qh->qtd_list));
- isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG);
- if (imask & HC_ATL_INT)
- do_atl_int(usb_hcd);
+ for (i = 0; i < QH_END; i++)
+ list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
+ if (qh_iter == qh) {
+ list_del(&qh_iter->qh_list);
+ i = QH_END;
+ break;
+ }
+ qh_free(qh);
+ ep->hcpriv = NULL;
- if (imask & HC_INTL_INT)
- do_intl_int(usb_hcd);
+ schedule_ptds(hcd);
- irqret = IRQ_HANDLED;
-leave:
- spin_unlock(&priv->lock);
- return irqret;
+out:
+ spin_unlock_irqrestore(&priv->lock, spinflags);
}
static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
@@ -1742,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
int retval = 1;
unsigned long flags;
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+ /* if !PM_RUNTIME, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
@@ -1751,12 +1748,12 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
mask = PORT_CSC;
spin_lock_irqsave(&priv->lock, flags);
- temp = isp1760_readl(hcd->regs + HC_PORTSC1);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
if (temp & PORT_OWNER) {
if (temp & PORT_CSC) {
temp &= ~PORT_CSC;
- isp1760_writel(temp, hcd->regs + HC_PORTSC1);
+ reg_write32(hcd->regs, HC_PORTSC1, temp);
goto done;
}
}
@@ -1796,9 +1793,9 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&desc->bitmap[0], 0, temp);
- memset(&desc->bitmap[temp], 0xff, temp);
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
/* per-port overcurrent reporting */
temp = 0x0008;
@@ -1813,8 +1810,8 @@ static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
-static int check_reset_complete(struct isp1760_hcd *priv, int index,
- u32 __iomem *status_reg, int port_status)
+static int check_reset_complete(struct usb_hcd *hcd, int index,
+ int port_status)
{
if (!(port_status & PORT_CONNECT))
return port_status;
@@ -1822,15 +1819,17 @@ static int check_reset_complete(struct isp1760_hcd *priv, int index,
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
- printk(KERN_ERR "port %d full speed --> companion\n",
- index + 1);
+ dev_info(hcd->self.controller,
+ "port %d full speed --> companion\n",
+ index + 1);
port_status |= PORT_OWNER;
port_status &= ~PORT_RWC_BITS;
- isp1760_writel(port_status, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, port_status);
} else
- printk(KERN_ERR "port %d high speed\n", index + 1);
+ dev_info(hcd->self.controller, "port %d high speed\n",
+ index + 1);
return port_status;
}
@@ -1840,7 +1839,6 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int ports = HCS_N_PORTS(priv->hcs_params);
- u32 __iomem *status_reg = hcd->regs + HC_PORTSC1;
u32 temp, status;
unsigned long flags;
int retval = 0;
@@ -1869,18 +1867,18 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- isp1760_writel(temp & ~PORT_PE, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
break;
case USB_PORT_FEAT_C_ENABLE:
/* XXX error? */
@@ -1894,8 +1892,8 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
goto error;
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS);
- isp1760_writel(temp | PORT_RESUME,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_RESUME);
priv->reset_done = jiffies +
msecs_to_jiffies(20);
}
@@ -1905,11 +1903,11 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(priv->hcs_params))
- isp1760_writel(temp & ~PORT_POWER, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp & ~PORT_POWER);
break;
case USB_PORT_FEAT_C_CONNECTION:
- isp1760_writel(temp | PORT_CSC,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
/* XXX error ?*/
@@ -1920,7 +1918,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
default:
goto error;
}
- isp1760_readl(hcd->regs + HC_USBCMD);
+ reg_read32(hcd->regs, HC_USBCMD);
break;
case GetHubDescriptor:
isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
@@ -1935,16 +1933,16 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
goto error;
wIndex--;
status = 0;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
/* wPortChange bits */
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
- printk(KERN_ERR "Port resume should be skipped.\n");
+ dev_err(hcd->self.controller, "Port resume should be skipped.\n");
/* Remote Wakeup received? */
if (!priv->reset_done) {
@@ -1952,25 +1950,23 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
- mod_timer(&priv_to_hcd(priv)->rh_timer,
- priv->reset_done);
+ mod_timer(&hcd->rh_timer, priv->reset_done);
}
/* resume completed? */
else if (time_after_eq(jiffies,
priv->reset_done)) {
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
priv->reset_done = 0;
/* stop resume signaling */
- temp = isp1760_readl(status_reg);
- isp1760_writel(
- temp & ~(PORT_RWC_BITS | PORT_RESUME),
- status_reg);
- retval = handshake(priv, status_reg,
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME));
+ retval = handshake(hcd, HC_PORTSC1,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
- isp1760_err(priv,
+ dev_err(hcd->self.controller,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
@@ -1983,26 +1979,25 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
priv->reset_done)) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ status |= USB_PORT_STAT_C_RESET << 16;
priv->reset_done = 0;
/* force reset to complete */
- isp1760_writel(temp & ~PORT_RESET,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
- retval = handshake(priv, status_reg,
+ retval = handshake(hcd, HC_PORTSC1,
PORT_RESET, 0, 750);
if (retval != 0) {
- isp1760_err(priv, "port %d reset error %d\n",
+ dev_err(hcd->self.controller, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
- temp = check_reset_complete(priv, wIndex, status_reg,
- isp1760_readl(status_reg));
+ temp = check_reset_complete(hcd, wIndex,
+ reg_read32(hcd->regs, HC_PORTSC1));
}
/*
* Even if OWNER is set, there's no harm letting khubd
@@ -2011,21 +2006,21 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
*/
if (temp & PORT_OWNER)
- printk(KERN_ERR "Warning: PORT_OWNER is set\n");
+ dev_err(hcd->self.controller, "PORT_OWNER is set\n");
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
- status |= ehci_port_speed(priv, temp);
+ status |= USB_PORT_STAT_HIGH_SPEED;
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
@@ -2045,14 +2040,14 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
- temp = isp1760_readl(status_reg);
+ temp = reg_read32(hcd->regs, HC_PORTSC1);
if (temp & PORT_OWNER)
break;
/* temp &= ~PORT_RWC_BITS; */
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- isp1760_writel(temp | PORT_PE, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
break;
case USB_PORT_FEAT_SUSPEND:
@@ -2060,12 +2055,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
|| (temp & PORT_RESET) != 0)
goto error;
- isp1760_writel(temp | PORT_SUSPEND, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(priv->hcs_params))
- isp1760_writel(temp | PORT_POWER,
- status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_POWER);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
@@ -2088,12 +2083,12 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
priv->reset_done = jiffies +
msecs_to_jiffies(50);
}
- isp1760_writel(temp, status_reg);
+ reg_write32(hcd->regs, HC_PORTSC1, temp);
break;
default:
goto error;
}
- isp1760_readl(hcd->regs + HC_USBCMD);
+ reg_read32(hcd->regs, HC_USBCMD);
break;
default:
@@ -2105,57 +2100,12 @@ error:
return retval;
}
-static void isp1760_endpoint_disable(struct usb_hcd *usb_hcd,
- struct usb_host_endpoint *ep)
-{
- struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- qh = ep->hcpriv;
- if (!qh)
- goto out;
-
- ep->hcpriv = NULL;
- do {
- /* more than entry might get removed */
- if (list_empty(&qh->qtd_list))
- break;
-
- qtd = list_first_entry(&qh->qtd_list, struct isp1760_qtd,
- qtd_list);
-
- if (qtd->status & URB_ENQUEUED) {
-
- spin_unlock_irqrestore(&priv->lock, flags);
- isp1760_urb_dequeue(usb_hcd, qtd->urb, -ECONNRESET);
- spin_lock_irqsave(&priv->lock, flags);
- } else {
- struct urb *urb;
-
- urb = qtd->urb;
- clean_up_qtdlist(qtd);
- isp1760_urb_done(priv, urb, -ECONNRESET);
- }
- } while (1);
-
- qh_destroy(qh);
- /* remove requests and leak them.
- * ATL are pretty fast done, INT could take a while...
- * The latter shoule be removed
- */
-out:
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
static int isp1760_get_frame(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 fr;
- fr = isp1760_readl(hcd->regs + HC_FRINDEX);
+ fr = reg_read32(hcd->regs, HC_FRINDEX);
return (fr >> 3) % priv->periodic_size;
}
@@ -2164,18 +2114,20 @@ static void isp1760_stop(struct usb_hcd *hcd)
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 temp;
+ del_timer(&errata2_timer);
+
isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
NULL, 0);
mdelay(20);
spin_lock_irq(&priv->lock);
- ehci_reset(priv);
+ ehci_reset(hcd);
/* Disable IRQ */
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp &= ~HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
spin_unlock_irq(&priv->lock);
- isp1760_writel(0, hcd->regs + HC_CONFIGFLAG);
+ reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
}
static void isp1760_shutdown(struct usb_hcd *hcd)
@@ -2183,14 +2135,31 @@ static void isp1760_shutdown(struct usb_hcd *hcd)
u32 command, temp;
isp1760_stop(hcd);
- temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
- isp1760_writel(temp &= ~HW_GLOBAL_INTR_EN, hcd->regs + HC_HW_MODE_CTRL);
+ temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
+ reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
- command = isp1760_readl(hcd->regs + HC_USBCMD);
+ command = reg_read32(hcd->regs, HC_USBCMD);
command &= ~CMD_RUN;
- isp1760_writel(command, hcd->regs + HC_USBCMD);
+ reg_write32(hcd->regs, HC_USBCMD, command);
}
+static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct isp1760_hcd *priv = hcd_to_priv(hcd);
+ struct isp1760_qh *qh = ep->hcpriv;
+ unsigned long spinflags;
+
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&priv->lock, spinflags);
+ qh->tt_buffer_dirty = 0;
+ schedule_ptds(hcd);
+ spin_unlock_irqrestore(&priv->lock, spinflags);
+}
+
+
static const struct hc_driver isp1760_hc_driver = {
.description = "isp1760-hcd",
.product_desc = "NXP ISP1760 USB Host Controller",
@@ -2207,10 +2176,18 @@ static const struct hc_driver isp1760_hc_driver = {
.get_frame_number = isp1760_get_frame,
.hub_status_data = isp1760_hub_status_data,
.hub_control = isp1760_hub_control,
+ .clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
};
int __init init_kmem_once(void)
{
+ urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
+ sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
+ SLAB_MEM_SPREAD, NULL);
+
+ if (!urb_listitem_cachep)
+ return -ENOMEM;
+
qtd_cachep = kmem_cache_create("isp1760_qtd",
sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
SLAB_MEM_SPREAD, NULL);
@@ -2233,11 +2210,14 @@ void deinit_kmem_cache(void)
{
kmem_cache_destroy(qtd_cachep);
kmem_cache_destroy(qh_cachep);
+ kmem_cache_destroy(urb_listitem_cachep);
}
-struct usb_hcd *isp1760_register(u64 res_start, u64 res_len, int irq,
- u64 irqflags, struct device *dev, const char *busname,
- unsigned int devflags)
+struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
+ int irq, unsigned long irqflags,
+ int rst_gpio,
+ struct device *dev, const char *busname,
+ unsigned int devflags)
{
struct usb_hcd *hcd;
struct isp1760_hcd *priv;
@@ -2255,6 +2235,7 @@ struct usb_hcd *isp1760_register(u64 res_start, u64 res_len, int irq,
priv = hcd_to_priv(hcd);
priv->devflags = devflags;
+ priv->rst_gpio = rst_gpio;
init_memory(priv);
hcd->regs = ioremap(res_start, res_len);
if (!hcd->regs) {
@@ -2269,6 +2250,7 @@ struct usb_hcd *isp1760_register(u64 res_start, u64 res_len, int irq,
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err_unmap;
+ device_wakeup_enable(hcd->self.controller);
return hcd;
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index a9daea58796..33dc79ccaa6 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -2,9 +2,11 @@
#define _ISP1760_HCD_H_
/* exports for if */
-struct usb_hcd *isp1760_register(u64 res_start, u64 res_len, int irq,
- u64 irqflags, struct device *dev, const char *busname,
- unsigned int devflags);
+struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
+ int irq, unsigned long irqflags,
+ int rst_gpio,
+ struct device *dev, const char *busname,
+ unsigned int devflags);
int init_kmem_once(void);
void deinit_kmem_cache(void);
@@ -48,10 +50,9 @@ void deinit_kmem_cache(void);
#define SW_RESET_RESET_ALL (1 << 0)
#define HC_BUFFER_STATUS_REG 0x334
-#define ATL_BUFFER 0x1
-#define INT_BUFFER 0x2
-#define ISO_BUFFER 0x4
-#define BUFFER_MAP 0x7
+#define ISO_BUF_FILL (1 << 2)
+#define INT_BUF_FILL (1 << 1)
+#define ATL_BUF_FILL (1 << 0)
#define HC_MEMORY_REG 0x33c
#define ISP_BANK(x) ((x) << 16)
@@ -67,13 +68,12 @@ void deinit_kmem_cache(void);
#define HC_INTERRUPT_REG 0x310
#define HC_INTERRUPT_ENABLE 0x314
-#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT)
-
#define HC_ISO_INT (1 << 9)
#define HC_ATL_INT (1 << 8)
#define HC_INTL_INT (1 << 7)
#define HC_EOT_INT (1 << 3)
#define HC_SOT_INT (1 << 1)
+#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT)
#define HC_ISO_IRQ_MASK_OR_REG 0x318
#define HC_INT_IRQ_MASK_OR_REG 0x31C
@@ -82,54 +82,38 @@ void deinit_kmem_cache(void);
#define HC_INT_IRQ_MASK_AND_REG 0x328
#define HC_ATL_IRQ_MASK_AND_REG 0x32C
-/* Register sets */
-#define HC_BEGIN_OF_ATL 0x0c00
-#define HC_BEGIN_OF_INT 0x0800
-#define HC_BEGIN_OF_ISO 0x0400
-#define HC_BEGIN_OF_PAYLOAD 0x1000
-
/* urb state*/
#define DELETE_URB (0x0008)
#define NO_TRANSFER_ACTIVE (0xffffffff)
-#define ATL_REGS_OFFSET (0xc00)
-#define INT_REGS_OFFSET (0x800)
-
-/* Philips Transfer Descriptor (PTD) */
+/* Philips Proprietary Transfer Descriptor (PTD) */
+typedef __u32 __bitwise __dw;
struct ptd {
- __le32 dw0;
- __le32 dw1;
- __le32 dw2;
- __le32 dw3;
- __le32 dw4;
- __le32 dw5;
- __le32 dw6;
- __le32 dw7;
+ __dw dw0;
+ __dw dw1;
+ __dw dw2;
+ __dw dw3;
+ __dw dw4;
+ __dw dw5;
+ __dw dw6;
+ __dw dw7;
};
+#define PTD_OFFSET 0x0400
+#define ISO_PTD_OFFSET 0x0400
+#define INT_PTD_OFFSET 0x0800
+#define ATL_PTD_OFFSET 0x0c00
+#define PAYLOAD_OFFSET 0x1000
-struct inter_packet_info {
- void *data_buffer;
- u32 payload;
-#define PTD_FIRE_NEXT (1 << 0)
-#define PTD_URB_FINISHED (1 << 1)
- struct urb *urb;
+struct slotinfo {
struct isp1760_qh *qh;
struct isp1760_qtd *qtd;
+ unsigned long timestamp;
};
typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd);
-#define isp1760_dbg(priv, fmt, args...) \
- dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
-#define isp1760_info(priv, fmt, args...) \
- dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
-#define isp1760_err(priv, fmt, args...) \
- dev_err(priv_to_hcd(priv)->self.controller, fmt, ##args)
-
/*
* Device flags that can vary from board to board. All of these
* indicate the most "atypical" case, so that a devflags of 0 is
@@ -141,6 +125,9 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
#define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */
#define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */
#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
+#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
+#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
+#define ISP1760_FLAG_RESET_ACTIVE_HIGH 0x80000000 /* RESET GPIO active high */
/* chip memory management */
struct memory_chunk {
@@ -164,63 +151,58 @@ struct memory_chunk {
#define BLOCK_2_SIZE 1024
#define BLOCK_3_SIZE 8192
#define BLOCKS (BLOCK_1_NUM + BLOCK_2_NUM + BLOCK_3_NUM)
-#define PAYLOAD_SIZE 0xf000
-
-/* I saw if some reloads if the pointer was negative */
-#define ISP1760_NULL_POINTER (0x400)
+#define MAX_PAYLOAD_SIZE BLOCK_3_SIZE
+#define PAYLOAD_AREA_SIZE 0xf000
/* ATL */
/* DW0 */
-#define PTD_VALID 1
-#define PTD_LENGTH(x) (((u32) x) << 3)
-#define PTD_MAXPACKET(x) (((u32) x) << 18)
-#define PTD_MULTI(x) (((u32) x) << 29)
-#define PTD_ENDPOINT(x) (((u32) x) << 31)
+#define DW0_VALID_BIT 1
+#define FROM_DW0_VALID(x) ((x) & 0x01)
+#define TO_DW0_LENGTH(x) (((u32) x) << 3)
+#define TO_DW0_MAXPACKET(x) (((u32) x) << 18)
+#define TO_DW0_MULTI(x) (((u32) x) << 29)
+#define TO_DW0_ENDPOINT(x) (((u32) x) << 31)
/* DW1 */
-#define PTD_DEVICE_ADDR(x) (((u32) x) << 3)
-#define PTD_PID_TOKEN(x) (((u32) x) << 10)
-#define PTD_TRANS_BULK ((u32) 2 << 12)
-#define PTD_TRANS_INT ((u32) 3 << 12)
-#define PTD_TRANS_SPLIT ((u32) 1 << 14)
-#define PTD_SE_USB_LOSPEED ((u32) 2 << 16)
-#define PTD_PORT_NUM(x) (((u32) x) << 18)
-#define PTD_HUB_NUM(x) (((u32) x) << 25)
-#define PTD_PING(x) (((u32) x) << 26)
+#define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3)
+#define TO_DW1_PID_TOKEN(x) (((u32) x) << 10)
+#define DW1_TRANS_BULK ((u32) 2 << 12)
+#define DW1_TRANS_INT ((u32) 3 << 12)
+#define DW1_TRANS_SPLIT ((u32) 1 << 14)
+#define DW1_SE_USB_LOSPEED ((u32) 2 << 16)
+#define TO_DW1_PORT_NUM(x) (((u32) x) << 18)
+#define TO_DW1_HUB_NUM(x) (((u32) x) << 25)
/* DW2 */
-#define PTD_RL_CNT(x) (((u32) x) << 25)
-#define PTD_DATA_START_ADDR(x) (((u32) x) << 8)
-#define BASE_ADDR 0x1000
+#define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8)
+#define TO_DW2_RL(x) ((x) << 25)
+#define FROM_DW2_RL(x) (((x) >> 25) & 0xf)
/* DW3 */
-#define PTD_CERR(x) (((u32) x) << 23)
-#define PTD_NAC_CNT(x) (((u32) x) << 19)
-#define PTD_ACTIVE ((u32) 1 << 31)
-#define PTD_DATA_TOGGLE(x) (((u32) x) << 25)
-
-#define DW3_HALT_BIT (1 << 30)
+#define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff)
+#define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff)
+#define TO_DW3_NAKCOUNT(x) ((x) << 19)
+#define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf)
+#define TO_DW3_CERR(x) ((x) << 23)
+#define FROM_DW3_CERR(x) (((x) >> 23) & 0x3)
+#define TO_DW3_DATA_TOGGLE(x) ((x) << 25)
+#define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1)
+#define TO_DW3_PING(x) ((x) << 26)
+#define FROM_DW3_PING(x) (((x) >> 26) & 0x1)
#define DW3_ERROR_BIT (1 << 28)
-#define DW3_QTD_ACTIVE (1 << 31)
+#define DW3_BABBLE_BIT (1 << 29)
+#define DW3_HALT_BIT (1 << 30)
+#define DW3_ACTIVE_BIT (1 << 31)
+#define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
#define INT_UNDERRUN (1 << 2)
#define INT_BABBLE (1 << 1)
#define INT_EXACT (1 << 0)
-#define DW1_GET_PID(x) (((x) >> 10) & 0x3)
-#define PTD_XFERRED_LENGTH(x) ((x) & 0x7fff)
-#define PTD_XFERRED_LENGTH_LO(x) ((x) & 0x7ff)
-
#define SETUP_PID (2)
#define IN_PID (1)
#define OUT_PID (0)
-#define GET_QTD_TOKEN_TYPE(x) ((x) & 0x3)
-
-#define DATA_TOGGLE (1 << 31)
-#define GET_DATA_TOGGLE(x) ((x) >> 31)
/* Errata 1 */
#define RL_COUNTER (0)
#define NAK_COUNTER (0)
#define ERR_COUNTER (2)
-#define HC_ATL_PL_SIZE (8192)
-
-#endif
+#endif /* _ISP1760_HCD_H_ */
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 4cf7ca428b3..df931e9ba5b 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -3,6 +3,7 @@
* Currently there is support for
* - OpenFirmware
* - PCI
+ * - PDEV (generic platform device centralized driver model)
*
* (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
*
@@ -10,59 +11,75 @@
#include <linux/usb.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/isp1760.h>
+#include <linux/usb/hcd.h>
-#include "../core/hcd.h"
#include "isp1760-hcd.h"
-#ifdef CONFIG_PPC_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
#endif
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
-#ifdef CONFIG_PPC_OF
-static int of_isp1760_probe(struct of_device *dev,
- const struct of_device_id *match)
-{
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+struct isp1760 {
struct usb_hcd *hcd;
- struct device_node *dp = dev->node;
+ int rst_gpio;
+};
+
+static int of_isp1760_probe(struct platform_device *dev)
+{
+ struct isp1760 *drvdata;
+ struct device_node *dp = dev->dev.of_node;
struct resource *res;
struct resource memory;
- struct of_irq oirq;
int virq;
- u64 res_len;
+ resource_size_t res_len;
int ret;
- const unsigned int *prop;
unsigned int devflags = 0;
+ enum of_gpio_flags gpio_flags;
+ u32 bus_width = 0;
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
ret = of_address_to_resource(dp, 0, &memory);
- if (ret)
- return -ENXIO;
+ if (ret) {
+ ret = -ENXIO;
+ goto free_data;
+ }
- res = request_mem_region(memory.start, memory.end - memory.start + 1,
- dev_name(&dev->dev));
- if (!res)
- return -EBUSY;
+ res_len = resource_size(&memory);
- res_len = memory.end - memory.start + 1;
+ res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
+ if (!res) {
+ ret = -EBUSY;
+ goto free_data;
+ }
- if (of_irq_map_one(dp, 0, &oirq)) {
+ virq = irq_of_parse_and_map(dp, 0);
+ if (!virq) {
ret = -ENODEV;
goto release_reg;
}
- virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
- oirq.size);
-
if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
devflags |= ISP1760_FLAG_ISP1761;
/* Some systems wire up only 16 of the 32 data lines */
- prop = of_get_property(dp, "bus-width", NULL);
- if (prop && *prop == 16)
+ of_property_read_u32(dp, "bus-width", &bus_width);
+ if (bus_width == 16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
if (of_get_property(dp, "port1-otg", NULL) != NULL)
@@ -77,36 +94,60 @@ static int of_isp1760_probe(struct of_device *dev,
if (of_get_property(dp, "dreq-polarity", NULL) != NULL)
devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
- hcd = isp1760_register(memory.start, res_len, virq,
- IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
- devflags);
- if (IS_ERR(hcd)) {
- ret = PTR_ERR(hcd);
- goto release_reg;
+ drvdata->rst_gpio = of_get_gpio_flags(dp, 0, &gpio_flags);
+ if (gpio_is_valid(drvdata->rst_gpio)) {
+ ret = gpio_request(drvdata->rst_gpio, dev_name(&dev->dev));
+ if (!ret) {
+ if (!(gpio_flags & OF_GPIO_ACTIVE_LOW)) {
+ devflags |= ISP1760_FLAG_RESET_ACTIVE_HIGH;
+ gpio_direction_output(drvdata->rst_gpio, 0);
+ } else {
+ gpio_direction_output(drvdata->rst_gpio, 1);
+ }
+ } else {
+ drvdata->rst_gpio = ret;
+ }
}
- dev_set_drvdata(&dev->dev, hcd);
+ drvdata->hcd = isp1760_register(memory.start, res_len, virq,
+ IRQF_SHARED, drvdata->rst_gpio,
+ &dev->dev, dev_name(&dev->dev),
+ devflags);
+ if (IS_ERR(drvdata->hcd)) {
+ ret = PTR_ERR(drvdata->hcd);
+ goto free_gpio;
+ }
+
+ platform_set_drvdata(dev, drvdata);
return ret;
+free_gpio:
+ if (gpio_is_valid(drvdata->rst_gpio))
+ gpio_free(drvdata->rst_gpio);
release_reg:
- release_mem_region(memory.start, memory.end - memory.start + 1);
+ release_mem_region(memory.start, res_len);
+free_data:
+ kfree(drvdata);
return ret;
}
-static int of_isp1760_remove(struct of_device *dev)
+static int of_isp1760_remove(struct platform_device *dev)
{
- struct usb_hcd *hcd = dev_get_drvdata(&dev->dev);
+ struct isp1760 *drvdata = platform_get_drvdata(dev);
- dev_set_drvdata(&dev->dev, NULL);
+ usb_remove_hcd(drvdata->hcd);
+ iounmap(drvdata->hcd->regs);
+ release_mem_region(drvdata->hcd->rsrc_start, drvdata->hcd->rsrc_len);
+ usb_put_hcd(drvdata->hcd);
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
+ if (gpio_is_valid(drvdata->rst_gpio))
+ gpio_free(drvdata->rst_gpio);
+
+ kfree(drvdata);
return 0;
}
-static struct of_device_id of_isp1760_match[] = {
+static const struct of_device_id of_isp1760_match[] = {
{
.compatible = "nxp,usb-isp1760",
},
@@ -117,16 +158,19 @@ static struct of_device_id of_isp1760_match[] = {
};
MODULE_DEVICE_TABLE(of, of_isp1760_match);
-static struct of_platform_driver isp1760_of_driver = {
- .name = "nxp-isp1760",
- .match_table = of_isp1760_match,
+static struct platform_driver isp1760_of_driver = {
+ .driver = {
+ .name = "nxp-isp1760",
+ .owner = THIS_MODULE,
+ .of_match_table = of_isp1760_match,
+ },
.probe = of_isp1760_probe,
.remove = of_isp1760_remove,
};
#endif
#ifdef CONFIG_PCI
-static int __devinit isp1761_pci_probe(struct pci_dev *dev,
+static int isp1761_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u8 latency, limit;
@@ -236,7 +280,7 @@ static int __devinit isp1761_pci_probe(struct pci_dev *dev,
dev->dev.dma_mask = NULL;
hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
- IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
+ IRQF_SHARED, -ENOENT, &dev->dev, dev_name(&dev->dev),
devflags);
if (IS_ERR(hcd)) {
ret_status = -ENODEV;
@@ -300,41 +344,131 @@ static struct pci_driver isp1761_pci_driver = {
};
#endif
+static int isp1760_plat_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct usb_hcd *hcd;
+ struct resource *mem_res;
+ struct resource *irq_res;
+ resource_size_t mem_size;
+ struct isp1760_platform_data *priv = dev_get_platdata(&pdev->dev);
+ unsigned int devflags = 0;
+ unsigned long irqflags = IRQF_SHARED;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ pr_warning("isp1760: Memory resource not available\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ mem_size = resource_size(mem_res);
+ if (!request_mem_region(mem_res->start, mem_size, "isp1760")) {
+ pr_warning("isp1760: Cannot reserve the memory resource\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ pr_warning("isp1760: IRQ resource not available\n");
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
+
+ if (priv) {
+ if (priv->is_isp1761)
+ devflags |= ISP1760_FLAG_ISP1761;
+ if (priv->bus_width_16)
+ devflags |= ISP1760_FLAG_BUS_WIDTH_16;
+ if (priv->port1_otg)
+ devflags |= ISP1760_FLAG_OTG_EN;
+ if (priv->analog_oc)
+ devflags |= ISP1760_FLAG_ANALOG_OC;
+ if (priv->dack_polarity_high)
+ devflags |= ISP1760_FLAG_DACK_POL_HIGH;
+ if (priv->dreq_polarity_high)
+ devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
+ }
+
+ hcd = isp1760_register(mem_res->start, mem_size, irq_res->start,
+ irqflags, -ENOENT,
+ &pdev->dev, dev_name(&pdev->dev), devflags);
+
+ platform_set_drvdata(pdev, hcd);
+
+ if (IS_ERR(hcd)) {
+ pr_warning("isp1760: Failed to register the HCD device\n");
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ pr_info("ISP1760 USB device initialised\n");
+ return ret;
+
+cleanup:
+ release_mem_region(mem_res->start, mem_size);
+out:
+ return ret;
+}
+
+static int isp1760_plat_remove(struct platform_device *pdev)
+{
+ struct resource *mem_res;
+ resource_size_t mem_size;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_remove_hcd(hcd);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem_size = resource_size(mem_res);
+ release_mem_region(mem_res->start, mem_size);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver isp1760_plat_driver = {
+ .probe = isp1760_plat_probe,
+ .remove = isp1760_plat_remove,
+ .driver = {
+ .name = "isp1760",
+ },
+};
+
static int __init isp1760_init(void)
{
- int ret;
+ int ret, any_ret = -ENODEV;
init_kmem_once();
-#ifdef CONFIG_PPC_OF
- ret = of_register_platform_driver(&isp1760_of_driver);
- if (ret) {
- deinit_kmem_cache();
- return ret;
- }
+ ret = platform_driver_register(&isp1760_plat_driver);
+ if (!ret)
+ any_ret = 0;
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+ ret = platform_driver_register(&isp1760_of_driver);
+ if (!ret)
+ any_ret = 0;
#endif
#ifdef CONFIG_PCI
ret = pci_register_driver(&isp1761_pci_driver);
- if (ret)
- goto unreg_of;
+ if (!ret)
+ any_ret = 0;
#endif
- return ret;
-#ifdef CONFIG_PCI
-unreg_of:
-#endif
-#ifdef CONFIG_PPC_OF
- of_unregister_platform_driver(&isp1760_of_driver);
-#endif
- deinit_kmem_cache();
- return ret;
+ if (any_ret)
+ deinit_kmem_cache();
+ return any_ret;
}
module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
-#ifdef CONFIG_PPC_OF
- of_unregister_platform_driver(&isp1760_of_driver);
+ platform_driver_unregister(&isp1760_plat_driver);
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+ platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
pci_unregister_driver(&isp1761_pci_driver);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
new file mode 100644
index 00000000000..858efcfda50
--- /dev/null
+++ b/drivers/usb/host/max3421-hcd.c
@@ -0,0 +1,1957 @@
+/*
+ * MAX3421 Host Controller driver for USB.
+ *
+ * Author: David Mosberger-Tang <davidm@egauge.net>
+ *
+ * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net>
+ *
+ * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
+ * controller on a SPI bus.
+ *
+ * Based on:
+ * o MAX3421E datasheet
+ * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
+ * o MAX3421E Programming Guide
+ * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
+ * o gadget/dummy_hcd.c
+ * For USB HCD implementation.
+ * o Arduino MAX3421 driver
+ * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
+ *
+ * This file is licenced under the GPL v2.
+ *
+ * Important note on worst-case (full-speed) packet size constraints
+ * (See USB 2.0 Section 5.6.3 and following):
+ *
+ * - control: 64 bytes
+ * - isochronous: 1023 bytes
+ * - interrupt: 64 bytes
+ * - bulk: 64 bytes
+ *
+ * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
+ * multi-FIFO writes/reads for a single USB packet *except* for isochronous
+ * transfers. We don't support isochronous transfers at this time, so we
+ * just assume that a USB packet always fits into a single FIFO buffer.
+ *
+ * NOTE: The June 2006 version of "MAX3421E Programming Guide"
+ * (AN3785) has conflicting info for the RCVDAVIRQ bit:
+ *
+ * The description of RCVDAVIRQ says "The CPU *must* clear
+ * this IRQ bit (by writing a 1 to it) before reading the
+ * RCVFIFO data.
+ *
+ * However, the earlier section on "Programming BULK-IN
+ * Transfers" says * that:
+ *
+ * After the CPU retrieves the data, it clears the
+ * RCVDAVIRQ bit.
+ *
+ * The December 2006 version has been corrected and it consistently
+ * states the second behavior is the correct one.
+ *
+ * Synchronous SPI transactions sleep so we can't perform any such
+ * transactions while holding a spin-lock (and/or while interrupts are
+ * masked). To achieve this, all SPI transactions are issued from a
+ * single thread (max3421_spi_thread).
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include <linux/platform_data/max3421-hcd.h>
+
+#define DRIVER_DESC "MAX3421 USB Host-Controller Driver"
+#define DRIVER_VERSION "1.0"
+
+/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
+#define USB_MAX_FRAME_NUMBER 0x7ff
+#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
+
+/*
+ * Max. # of times we're willing to retransmit a request immediately in
+ * resposne to a NAK. Afterwards, we fall back on trying once a frame.
+ */
+#define NAK_MAX_FAST_RETRANSMITS 2
+
+#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
+
+/* Port-change mask: */
+#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
+ USB_PORT_STAT_C_ENABLE | \
+ USB_PORT_STAT_C_SUSPEND | \
+ USB_PORT_STAT_C_OVERCURRENT | \
+ USB_PORT_STAT_C_RESET) << 16)
+
+enum max3421_rh_state {
+ MAX3421_RH_RESET,
+ MAX3421_RH_SUSPENDED,
+ MAX3421_RH_RUNNING
+};
+
+enum pkt_state {
+ PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */
+ PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */
+ PKT_STATE_TERMINATE /* waiting to terminate control transfer */
+};
+
+enum scheduling_pass {
+ SCHED_PASS_PERIODIC,
+ SCHED_PASS_NON_PERIODIC,
+ SCHED_PASS_DONE
+};
+
+struct max3421_dma_buf {
+ u8 data[2];
+};
+
+struct max3421_hcd {
+ spinlock_t lock;
+
+ struct task_struct *spi_thread;
+
+ struct max3421_hcd *next;
+
+ enum max3421_rh_state rh_state;
+ /* lower 16 bits contain port status, upper 16 bits the change mask: */
+ u32 port_status;
+
+ unsigned active:1;
+
+ struct list_head ep_list; /* list of EP's with work */
+
+ /*
+ * The following are owned by spi_thread (may be accessed by
+ * SPI-thread without acquiring the HCD lock:
+ */
+ u8 rev; /* chip revision */
+ u16 frame_number;
+ /*
+ * kmalloc'd buffers guaranteed to be in separate (DMA)
+ * cache-lines:
+ */
+ struct max3421_dma_buf *tx;
+ struct max3421_dma_buf *rx;
+ /*
+ * URB we're currently processing. Must not be reset to NULL
+ * unless MAX3421E chip is idle:
+ */
+ struct urb *curr_urb;
+ enum scheduling_pass sched_pass;
+ struct usb_device *loaded_dev; /* dev that's loaded into the chip */
+ int loaded_epnum; /* epnum whose toggles are loaded */
+ int urb_done; /* > 0 -> no errors, < 0: errno */
+ size_t curr_len;
+ u8 hien;
+ u8 mode;
+ u8 iopins[2];
+ unsigned int do_enable_irq:1;
+ unsigned int do_reset_hcd:1;
+ unsigned int do_reset_port:1;
+ unsigned int do_check_unlink:1;
+ unsigned int do_iopin_update:1;
+#ifdef DEBUG
+ unsigned long err_stat[16];
+#endif
+};
+
+struct max3421_ep {
+ struct usb_host_endpoint *ep;
+ struct list_head ep_list;
+ u32 naks;
+ u16 last_active; /* frame # this ep was last active */
+ enum pkt_state pkt_state;
+ u8 retries;
+ u8 retransmit; /* packet needs retransmission */
+};
+
+static struct max3421_hcd *max3421_hcd_list;
+
+#define MAX3421_FIFO_SIZE 64
+
+#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
+#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */
+
+/* SPI commands: */
+#define MAX3421_SPI_DIR_SHIFT 1
+#define MAX3421_SPI_REG_SHIFT 3
+
+#define MAX3421_REG_RCVFIFO 1
+#define MAX3421_REG_SNDFIFO 2
+#define MAX3421_REG_SUDFIFO 4
+#define MAX3421_REG_RCVBC 6
+#define MAX3421_REG_SNDBC 7
+#define MAX3421_REG_USBIRQ 13
+#define MAX3421_REG_USBIEN 14
+#define MAX3421_REG_USBCTL 15
+#define MAX3421_REG_CPUCTL 16
+#define MAX3421_REG_PINCTL 17
+#define MAX3421_REG_REVISION 18
+#define MAX3421_REG_IOPINS1 20
+#define MAX3421_REG_IOPINS2 21
+#define MAX3421_REG_GPINIRQ 22
+#define MAX3421_REG_GPINIEN 23
+#define MAX3421_REG_GPINPOL 24
+#define MAX3421_REG_HIRQ 25
+#define MAX3421_REG_HIEN 26
+#define MAX3421_REG_MODE 27
+#define MAX3421_REG_PERADDR 28
+#define MAX3421_REG_HCTL 29
+#define MAX3421_REG_HXFR 30
+#define MAX3421_REG_HRSL 31
+
+enum {
+ MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
+ MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
+ MAX3421_USBIRQ_VBUSIRQ_BIT
+};
+
+enum {
+ MAX3421_CPUCTL_IE_BIT = 0,
+ MAX3421_CPUCTL_PULSEWID0_BIT = 6,
+ MAX3421_CPUCTL_PULSEWID1_BIT
+};
+
+enum {
+ MAX3421_USBCTL_PWRDOWN_BIT = 4,
+ MAX3421_USBCTL_CHIPRES_BIT
+};
+
+enum {
+ MAX3421_PINCTL_GPXA_BIT = 0,
+ MAX3421_PINCTL_GPXB_BIT,
+ MAX3421_PINCTL_POSINT_BIT,
+ MAX3421_PINCTL_INTLEVEL_BIT,
+ MAX3421_PINCTL_FDUPSPI_BIT,
+ MAX3421_PINCTL_EP0INAK_BIT,
+ MAX3421_PINCTL_EP2INAK_BIT,
+ MAX3421_PINCTL_EP3INAK_BIT,
+};
+
+enum {
+ MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */
+ MAX3421_HI_RWU_BIT, /* remote wakeup */
+ MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */
+ MAX3421_HI_SNDBAV_BIT, /* send buffer available */
+ MAX3421_HI_SUSDN_BIT, /* suspend operation done */
+ MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */
+ MAX3421_HI_FRAME_BIT, /* frame generator */
+ MAX3421_HI_HXFRDN_BIT, /* host transfer done */
+};
+
+enum {
+ MAX3421_HCTL_BUSRST_BIT = 0,
+ MAX3421_HCTL_FRMRST_BIT,
+ MAX3421_HCTL_SAMPLEBUS_BIT,
+ MAX3421_HCTL_SIGRSM_BIT,
+ MAX3421_HCTL_RCVTOG0_BIT,
+ MAX3421_HCTL_RCVTOG1_BIT,
+ MAX3421_HCTL_SNDTOG0_BIT,
+ MAX3421_HCTL_SNDTOG1_BIT
+};
+
+enum {
+ MAX3421_MODE_HOST_BIT = 0,
+ MAX3421_MODE_LOWSPEED_BIT,
+ MAX3421_MODE_HUBPRE_BIT,
+ MAX3421_MODE_SOFKAENAB_BIT,
+ MAX3421_MODE_SEPIRQ_BIT,
+ MAX3421_MODE_DELAYISO_BIT,
+ MAX3421_MODE_DMPULLDN_BIT,
+ MAX3421_MODE_DPPULLDN_BIT
+};
+
+enum {
+ MAX3421_HRSL_OK = 0,
+ MAX3421_HRSL_BUSY,
+ MAX3421_HRSL_BADREQ,
+ MAX3421_HRSL_UNDEF,
+ MAX3421_HRSL_NAK,
+ MAX3421_HRSL_STALL,
+ MAX3421_HRSL_TOGERR,
+ MAX3421_HRSL_WRONGPID,
+ MAX3421_HRSL_BADBC,
+ MAX3421_HRSL_PIDERR,
+ MAX3421_HRSL_PKTERR,
+ MAX3421_HRSL_CRCERR,
+ MAX3421_HRSL_KERR,
+ MAX3421_HRSL_JERR,
+ MAX3421_HRSL_TIMEOUT,
+ MAX3421_HRSL_BABBLE,
+ MAX3421_HRSL_RESULT_MASK = 0xf,
+ MAX3421_HRSL_RCVTOGRD_BIT = 4,
+ MAX3421_HRSL_SNDTOGRD_BIT,
+ MAX3421_HRSL_KSTATUS_BIT,
+ MAX3421_HRSL_JSTATUS_BIT
+};
+
+/* Return same error-codes as ohci.h:cc_to_error: */
+static const int hrsl_to_error[] = {
+ [MAX3421_HRSL_OK] = 0,
+ [MAX3421_HRSL_BUSY] = -EINVAL,
+ [MAX3421_HRSL_BADREQ] = -EINVAL,
+ [MAX3421_HRSL_UNDEF] = -EINVAL,
+ [MAX3421_HRSL_NAK] = -EAGAIN,
+ [MAX3421_HRSL_STALL] = -EPIPE,
+ [MAX3421_HRSL_TOGERR] = -EILSEQ,
+ [MAX3421_HRSL_WRONGPID] = -EPROTO,
+ [MAX3421_HRSL_BADBC] = -EREMOTEIO,
+ [MAX3421_HRSL_PIDERR] = -EPROTO,
+ [MAX3421_HRSL_PKTERR] = -EPROTO,
+ [MAX3421_HRSL_CRCERR] = -EILSEQ,
+ [MAX3421_HRSL_KERR] = -EIO,
+ [MAX3421_HRSL_JERR] = -EIO,
+ [MAX3421_HRSL_TIMEOUT] = -ETIME,
+ [MAX3421_HRSL_BABBLE] = -EOVERFLOW
+};
+
+/*
+ * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
+ * reasonable overview of how control transfers use the the IN/OUT
+ * tokens.
+ */
+#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
+#define MAX3421_HXFR_SETUP 0x10
+#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
+#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
+#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
+#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */
+#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */
+
+#define field(val, bit) ((val) << (bit))
+
+static inline s16
+frame_diff(u16 left, u16 right)
+{
+ return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
+}
+
+static inline struct max3421_hcd *
+hcd_to_max3421(struct usb_hcd *hcd)
+{
+ return (struct max3421_hcd *) hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *
+max3421_to_hcd(struct max3421_hcd *max3421_hcd)
+{
+ return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
+}
+
+static u8
+spi_rd8(struct usb_hcd *hcd, unsigned int reg)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct spi_transfer transfer;
+ struct spi_message msg;
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+
+ transfer.tx_buf = max3421_hcd->tx->data;
+ transfer.rx_buf = max3421_hcd->rx->data;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+
+ return max3421_hcd->rx->data[1];
+}
+
+static void
+spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer;
+ struct spi_message msg;
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+ max3421_hcd->tx->data[1] = val;
+
+ transfer.tx_buf = max3421_hcd->tx->data;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static void
+spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
+
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+ transfer[0].tx_buf = max3421_hcd->tx->data;
+ transfer[0].len = 1;
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ spi_sync(spi, &msg);
+}
+
+static void
+spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
+
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ max3421_hcd->tx->data[0] =
+ (field(reg, MAX3421_SPI_REG_SHIFT) |
+ field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+
+ transfer[0].tx_buf = max3421_hcd->tx->data;
+ transfer[0].len = 1;
+
+ transfer[1].tx_buf = buf;
+ transfer[1].len = len;
+
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ spi_sync(spi, &msg);
+}
+
+/*
+ * Figure out the correct setting for the LOWSPEED and HUBPRE mode
+ * bits. The HUBPRE bit needs to be set when MAX3421E operates at
+ * full speed, but it's talking to a low-speed device (i.e., through a
+ * hub). Setting that bit ensures that every low-speed packet is
+ * preceded by a full-speed PRE PID. Possible configurations:
+ *
+ * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit:
+ * FULL FULL => 0 0
+ * FULL LOW => 1 1
+ * LOW LOW => 1 0
+ * LOW FULL => 1 0
+ */
+static void
+max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
+
+ mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
+ mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT);
+ if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
+ mode |= mode_lowspeed;
+ mode &= ~mode_hubpre;
+ } else if (dev->speed == USB_SPEED_LOW) {
+ mode |= mode_lowspeed | mode_hubpre;
+ } else {
+ mode &= ~(mode_lowspeed | mode_hubpre);
+ }
+ if (mode != max3421_hcd->mode) {
+ max3421_hcd->mode = mode;
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+ }
+
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
+ int force_toggles)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int old_epnum, same_ep, rcvtog, sndtog;
+ struct usb_device *old_dev;
+ u8 hctl;
+
+ old_dev = max3421_hcd->loaded_dev;
+ old_epnum = max3421_hcd->loaded_epnum;
+
+ same_ep = (dev == old_dev && epnum == old_epnum);
+ if (same_ep && !force_toggles)
+ return;
+
+ if (old_dev && !same_ep) {
+ /* save the old end-points toggles: */
+ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+ rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+ sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+ /* no locking: HCD (i.e., we) own toggles, don't we? */
+ usb_settoggle(old_dev, old_epnum, 0, rcvtog);
+ usb_settoggle(old_dev, old_epnum, 1, sndtog);
+ }
+ /* setup new endpoint's toggle bits: */
+ rcvtog = usb_gettoggle(dev, epnum, 0);
+ sndtog = usb_gettoggle(dev, epnum, 1);
+ hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
+ BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+
+ max3421_hcd->loaded_epnum = epnum;
+ spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
+
+ /*
+ * Note: devnum for one and the same device can change during
+ * address-assignment so it's best to just always load the
+ * address whenever the end-point changed/was forced.
+ */
+ max3421_hcd->loaded_dev = dev;
+ spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
+}
+
+static int
+max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
+{
+ spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
+ return MAX3421_HXFR_SETUP;
+}
+
+static int
+max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int epnum = usb_pipeendpoint(urb->pipe);
+
+ max3421_hcd->curr_len = 0;
+ max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
+ return MAX3421_HXFR_BULK_IN(epnum);
+}
+
+static int
+max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int epnum = usb_pipeendpoint(urb->pipe);
+ u32 max_packet;
+ void *src;
+
+ src = urb->transfer_buffer + urb->actual_length;
+
+ if (fast_retransmit) {
+ if (max3421_hcd->rev == 0x12) {
+ /* work around rev 0x12 bug: */
+ spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+ spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
+ spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+ }
+ return MAX3421_HXFR_BULK_OUT(epnum);
+ }
+
+ max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+ if (max_packet > MAX3421_FIFO_SIZE) {
+ /*
+ * We do not support isochronous transfers at this
+ * time.
+ */
+ dev_err(&spi->dev,
+ "%s: packet-size of %u too big (limit is %u bytes)",
+ __func__, max_packet, MAX3421_FIFO_SIZE);
+ max3421_hcd->urb_done = -EMSGSIZE;
+ return -EMSGSIZE;
+ }
+ max3421_hcd->curr_len = min((urb->transfer_buffer_length -
+ urb->actual_length), max_packet);
+
+ spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
+ spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+ return MAX3421_HXFR_BULK_OUT(epnum);
+}
+
+/*
+ * Issue the next host-transfer command.
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+ int cmd = -EINVAL;
+
+ if (!urb)
+ return; /* nothing to do */
+
+ max3421_ep = urb->ep->hcpriv;
+
+ switch (max3421_ep->pkt_state) {
+ case PKT_STATE_SETUP:
+ cmd = max3421_ctrl_setup(hcd, urb);
+ break;
+
+ case PKT_STATE_TRANSFER:
+ if (usb_urb_dir_in(urb))
+ cmd = max3421_transfer_in(hcd, urb);
+ else
+ cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
+ break;
+
+ case PKT_STATE_TERMINATE:
+ /*
+ * IN transfers are terminated with HS_OUT token,
+ * OUT transfers with HS_IN:
+ */
+ if (usb_urb_dir_in(urb))
+ cmd = MAX3421_HXFR_HS_OUT;
+ else
+ cmd = MAX3421_HXFR_HS_IN;
+ break;
+ }
+
+ if (cmd < 0)
+ return;
+
+ /* issue the command and wait for host-xfer-done interrupt: */
+
+ spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
+ max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
+}
+
+/*
+ * Find the next URB to process and start its execution.
+ *
+ * At this time, we do not anticipate ever connecting a USB hub to the
+ * MAX3421 chip, so at most USB device can be connected and we can use
+ * a simplistic scheduler: at the start of a frame, schedule all
+ * periodic transfers. Once that is done, use the remainder of the
+ * frame to process non-periodic (bulk & control) transfers.
+ *
+ * Preconditions:
+ * o Caller must NOT hold HCD spinlock.
+ * o max3421_hcd->curr_urb MUST BE NULL.
+ * o MAX3421E chip must be idle.
+ */
+static int
+max3421_select_and_start_urb(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb, *curr_urb = NULL;
+ struct max3421_ep *max3421_ep;
+ int epnum, force_toggles = 0;
+ struct usb_host_endpoint *ep;
+ struct list_head *pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ for (;
+ max3421_hcd->sched_pass < SCHED_PASS_DONE;
+ ++max3421_hcd->sched_pass)
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ urb = NULL;
+ max3421_ep = container_of(pos, struct max3421_ep,
+ ep_list);
+ ep = max3421_ep->ep;
+
+ switch (usb_endpoint_type(&ep->desc)) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ if (max3421_hcd->sched_pass !=
+ SCHED_PASS_PERIODIC)
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ if (max3421_hcd->sched_pass !=
+ SCHED_PASS_NON_PERIODIC)
+ continue;
+ break;
+ }
+
+ if (list_empty(&ep->urb_list))
+ continue; /* nothing to do */
+ urb = list_first_entry(&ep->urb_list, struct urb,
+ urb_list);
+ if (urb->unlinked) {
+ dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ __func__, urb, urb->unlinked);
+ max3421_hcd->curr_urb = urb;
+ max3421_hcd->urb_done = 1;
+ spin_unlock_irqrestore(&max3421_hcd->lock,
+ flags);
+ return 1;
+ }
+
+ switch (usb_endpoint_type(&ep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /*
+ * Allow one control transaction per
+ * frame per endpoint:
+ */
+ if (frame_diff(max3421_ep->last_active,
+ max3421_hcd->frame_number) == 0)
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ if (max3421_ep->retransmit
+ && (frame_diff(max3421_ep->last_active,
+ max3421_hcd->frame_number)
+ == 0))
+ /*
+ * We already tried this EP
+ * during this frame and got a
+ * NAK or error; wait for next frame
+ */
+ continue;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ if (frame_diff(max3421_hcd->frame_number,
+ max3421_ep->last_active)
+ < urb->interval)
+ /*
+ * We already processed this
+ * end-point in the current
+ * frame
+ */
+ continue;
+ break;
+ }
+
+ /* move current ep to tail: */
+ list_move_tail(pos, &max3421_hcd->ep_list);
+ curr_urb = urb;
+ goto done;
+ }
+done:
+ if (!curr_urb) {
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return 0;
+ }
+
+ urb = max3421_hcd->curr_urb = curr_urb;
+ epnum = usb_endpoint_num(&urb->ep->desc);
+ if (max3421_ep->retransmit)
+ /* restart (part of) a USB transaction: */
+ max3421_ep->retransmit = 0;
+ else {
+ /* start USB transaction: */
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ /*
+ * See USB 2.0 spec section 8.6.1
+ * Initialization via SETUP Token:
+ */
+ usb_settoggle(urb->dev, epnum, 0, 1);
+ usb_settoggle(urb->dev, epnum, 1, 1);
+ max3421_ep->pkt_state = PKT_STATE_SETUP;
+ force_toggles = 1;
+ } else
+ max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ max3421_ep->last_active = max3421_hcd->frame_number;
+ max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+ max3421_set_speed(hcd, urb->dev);
+ max3421_next_transfer(hcd, 0);
+ return 1;
+}
+
+/*
+ * Check all endpoints for URBs that got unlinked.
+ *
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_check_unlink(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct list_head *pos, *upos, *next_upos;
+ struct max3421_ep *max3421_ep;
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ ep = max3421_ep->ep;
+ list_for_each_safe(upos, next_upos, &ep->urb_list) {
+ urb = container_of(upos, struct urb, urb_list);
+ if (urb->unlinked) {
+ retval = 1;
+ dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+ __func__, urb, urb->unlinked);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&max3421_hcd->lock,
+ flags);
+ usb_hcd_giveback_urb(hcd, urb, 0);
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_slow_retransmit(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+
+ max3421_ep = urb->ep->hcpriv;
+ max3421_ep->retransmit = 1;
+ max3421_hcd->curr_urb = NULL;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_recv_data_available(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ size_t remaining, transfer_size;
+ u8 rcvbc;
+
+ rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
+
+ if (rcvbc > MAX3421_FIFO_SIZE)
+ rcvbc = MAX3421_FIFO_SIZE;
+ if (urb->actual_length >= urb->transfer_buffer_length)
+ remaining = 0;
+ else
+ remaining = urb->transfer_buffer_length - urb->actual_length;
+ transfer_size = rcvbc;
+ if (transfer_size > remaining)
+ transfer_size = remaining;
+ if (transfer_size > 0) {
+ void *dst = urb->transfer_buffer + urb->actual_length;
+
+ spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
+ urb->actual_length += transfer_size;
+ max3421_hcd->curr_len = transfer_size;
+ }
+
+ /* ack the RCVDAV irq now that the FIFO has been read: */
+ spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
+}
+
+static void
+max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep = urb->ep->hcpriv;
+ int switch_sndfifo;
+
+ /*
+ * If an OUT command results in any response other than OK
+ * (i.e., error or NAK), we have to perform a dummy-write to
+ * SNDBC so the FIFO gets switched back to us. Otherwise, we
+ * get out of sync with the SNDFIFO double buffer.
+ */
+ switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
+ usb_urb_dir_out(urb));
+
+ switch (result_code) {
+ case MAX3421_HRSL_OK:
+ return; /* this shouldn't happen */
+
+ case MAX3421_HRSL_WRONGPID: /* received wrong PID */
+ case MAX3421_HRSL_BUSY: /* SIE busy */
+ case MAX3421_HRSL_BADREQ: /* bad val in HXFR */
+ case MAX3421_HRSL_UNDEF: /* reserved */
+ case MAX3421_HRSL_KERR: /* K-state instead of response */
+ case MAX3421_HRSL_JERR: /* J-state instead of response */
+ /*
+ * packet experienced an error that we cannot recover
+ * from; report error
+ */
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ break;
+
+ case MAX3421_HRSL_TOGERR:
+ if (usb_urb_dir_in(urb))
+ ; /* don't do anything (device will switch toggle) */
+ else {
+ /* flip the send toggle bit: */
+ int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+ sndtog ^= 1;
+ spi_wr8(hcd, MAX3421_REG_HCTL,
+ BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+ }
+ /* FALL THROUGH */
+ case MAX3421_HRSL_BADBC: /* bad byte count */
+ case MAX3421_HRSL_PIDERR: /* received PID is corrupted */
+ case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */
+ case MAX3421_HRSL_CRCERR: /* CRC error */
+ case MAX3421_HRSL_BABBLE: /* device talked too long */
+ case MAX3421_HRSL_TIMEOUT:
+ if (max3421_ep->retries++ < USB_MAX_RETRIES)
+ /* retry the packet again in the next frame */
+ max3421_slow_retransmit(hcd);
+ else {
+ /* Based on ohci.h cc_to_err[]: */
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ }
+ break;
+
+ case MAX3421_HRSL_STALL:
+ dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+ __func__, hrsl);
+ max3421_hcd->urb_done = hrsl_to_error[result_code];
+ break;
+
+ case MAX3421_HRSL_NAK:
+ /*
+ * Device wasn't ready for data or has no data
+ * available: retry the packet again.
+ */
+ if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
+ max3421_next_transfer(hcd, 1);
+ switch_sndfifo = 0;
+ } else
+ max3421_slow_retransmit(hcd);
+ break;
+ }
+ if (switch_sndfifo)
+ spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u32 max_packet;
+
+ if (urb->actual_length >= urb->transfer_buffer_length)
+ return 1; /* read is complete, so we're done */
+
+ /*
+ * USB 2.0 Section 5.3.2 Pipes: packets must be full size
+ * except for last one.
+ */
+ max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
+ if (max_packet > MAX3421_FIFO_SIZE) {
+ /*
+ * We do not support isochronous transfers at this
+ * time...
+ */
+ dev_err(&spi->dev,
+ "%s: packet-size of %u too big (limit is %u bytes)",
+ __func__, max_packet, MAX3421_FIFO_SIZE);
+ return -EINVAL;
+ }
+
+ if (max3421_hcd->curr_len < max_packet) {
+ if (urb->transfer_flags & URB_SHORT_NOT_OK) {
+ /*
+ * remaining > 0 and received an
+ * unexpected partial packet ->
+ * error
+ */
+ return -EREMOTEIO;
+ } else
+ /* short read, but it's OK */
+ return 1;
+ }
+ return 0; /* not done */
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ urb->actual_length += max3421_hcd->curr_len;
+ if (urb->actual_length < urb->transfer_buffer_length)
+ return 0;
+ if (urb->transfer_flags & URB_ZERO_PACKET) {
+ /*
+ * Some hardware needs a zero-size packet at the end
+ * of a bulk-out transfer if the last transfer was a
+ * full-sized packet (i.e., such hardware use <
+ * max_packet as an indicator that the end of the
+ * packet has been reached).
+ */
+ u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+ if (max3421_hcd->curr_len == max_packet)
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_host_transfer_done(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct urb *urb = max3421_hcd->curr_urb;
+ struct max3421_ep *max3421_ep;
+ u8 result_code, hrsl;
+ int urb_done = 0;
+
+ max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
+ BIT(MAX3421_HI_RCVDAV_BIT));
+
+ hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+ result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+
+#ifdef DEBUG
+ ++max3421_hcd->err_stat[result_code];
+#endif
+
+ max3421_ep = urb->ep->hcpriv;
+
+ if (unlikely(result_code != MAX3421_HRSL_OK)) {
+ max3421_handle_error(hcd, hrsl);
+ return;
+ }
+
+ max3421_ep->naks = 0;
+ max3421_ep->retries = 0;
+ switch (max3421_ep->pkt_state) {
+
+ case PKT_STATE_SETUP:
+ if (urb->transfer_buffer_length > 0)
+ max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+ else
+ max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+ break;
+
+ case PKT_STATE_TRANSFER:
+ if (usb_urb_dir_in(urb))
+ urb_done = max3421_transfer_in_done(hcd, urb);
+ else
+ urb_done = max3421_transfer_out_done(hcd, urb);
+ if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
+ /*
+ * We aren't really done - we still need to
+ * terminate the control transfer:
+ */
+ max3421_hcd->urb_done = urb_done = 0;
+ max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+ }
+ break;
+
+ case PKT_STATE_TERMINATE:
+ urb_done = 1;
+ break;
+ }
+
+ if (urb_done)
+ max3421_hcd->urb_done = urb_done;
+ else
+ max3421_next_transfer(hcd, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_detect_conn(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned int jk, have_conn = 0;
+ u32 old_port_status, chg;
+ unsigned long flags;
+ u8 hrsl, mode;
+
+ hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+ jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
+ (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
+
+ mode = max3421_hcd->mode;
+
+ switch (jk) {
+ case 0x0: /* SE0: disconnect */
+ /*
+ * Turn off SOFKAENAB bit to avoid getting interrupt
+ * every milli-second:
+ */
+ mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
+ break;
+
+ case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
+ case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
+ if (jk == 0x2)
+ /* need to switch to the other speed: */
+ mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
+ /* turn on SOFKAENAB bit: */
+ mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
+ have_conn = 1;
+ break;
+
+ case 0x3: /* illegal */
+ break;
+ }
+
+ max3421_hcd->mode = mode;
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ old_port_status = max3421_hcd->port_status;
+ if (have_conn)
+ max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION;
+ else
+ max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
+ if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
+ max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED;
+ else
+ max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
+ chg = (old_port_status ^ max3421_hcd->port_status);
+ max3421_hcd->port_status |= chg << 16;
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static irqreturn_t
+max3421_irq_handler(int irq, void *dev_id)
+{
+ struct usb_hcd *hcd = dev_id;
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ if (max3421_hcd->spi_thread &&
+ max3421_hcd->spi_thread->state != TASK_RUNNING)
+ wake_up_process(max3421_hcd->spi_thread);
+ if (!max3421_hcd->do_enable_irq) {
+ max3421_hcd->do_enable_irq = 1;
+ disable_irq_nosync(spi->irq);
+ }
+ return IRQ_HANDLED;
+}
+
+#ifdef DEBUG
+
+static void
+dump_eps(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_ep *max3421_ep;
+ struct usb_host_endpoint *ep;
+ struct list_head *pos, *upos;
+ char ubuf[512], *dp, *end;
+ unsigned long flags;
+ struct urb *urb;
+ int epnum, ret;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ list_for_each(pos, &max3421_hcd->ep_list) {
+ max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ ep = max3421_ep->ep;
+
+ dp = ubuf;
+ end = dp + sizeof(ubuf);
+ *dp = '\0';
+ list_for_each(upos, &ep->urb_list) {
+ urb = container_of(upos, struct urb, urb_list);
+ ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
+ usb_pipetype(urb->pipe),
+ usb_urb_dir_in(urb) ? "IN" : "OUT",
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ if (ret < 0 || ret >= end - dp)
+ break; /* error or buffer full */
+ dp += ret;
+ }
+
+ epnum = usb_endpoint_num(&ep->desc);
+ pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
+ epnum, max3421_ep->pkt_state, max3421_ep->last_active,
+ max3421_ep->retries, max3421_ep->naks,
+ max3421_ep->retransmit, ubuf);
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+#endif /* DEBUG */
+
+/* Return zero if no work was performed, 1 otherwise. */
+static int
+max3421_handle_irqs(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u32 chg, old_port_status;
+ unsigned long flags;
+ u8 hirq;
+
+ /*
+ * Read and ack pending interrupts (CPU must never
+ * clear SNDBAV directly and RCVDAV must be cleared by
+ * max3421_recv_data_available()!):
+ */
+ hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
+ hirq &= max3421_hcd->hien;
+ if (!hirq)
+ return 0;
+
+ spi_wr8(hcd, MAX3421_REG_HIRQ,
+ hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
+ BIT(MAX3421_HI_RCVDAV_BIT)));
+
+ if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
+ max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
+ & USB_MAX_FRAME_NUMBER);
+ max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+ }
+
+ if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
+ max3421_recv_data_available(hcd);
+
+ if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
+ max3421_host_transfer_done(hcd);
+
+ if (hirq & BIT(MAX3421_HI_CONDET_BIT))
+ max3421_detect_conn(hcd);
+
+ /*
+ * Now process interrupts that may affect HCD state
+ * other than the end-points:
+ */
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ old_port_status = max3421_hcd->port_status;
+ if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
+ if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
+ /* BUSEVENT due to completion of Bus Reset */
+ max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
+ max3421_hcd->port_status |= USB_PORT_STAT_ENABLE;
+ } else {
+ /* BUSEVENT due to completion of Bus Resume */
+ pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
+ }
+ }
+ if (hirq & BIT(MAX3421_HI_RWU_BIT))
+ pr_info("%s: RWU\n", __func__);
+ if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
+ pr_info("%s: SUSDN\n", __func__);
+
+ chg = (old_port_status ^ max3421_hcd->port_status);
+ max3421_hcd->port_status |= chg << 16;
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+#ifdef DEBUG
+ {
+ static unsigned long last_time;
+ char sbuf[16 * 16], *dp, *end;
+ int i;
+
+ if (jiffies - last_time > 5*HZ) {
+ dp = sbuf;
+ end = sbuf + sizeof(sbuf);
+ *dp = '\0';
+ for (i = 0; i < 16; ++i) {
+ int ret = snprintf(dp, end - dp, " %lu",
+ max3421_hcd->err_stat[i]);
+ if (ret < 0 || ret >= end - dp)
+ break; /* error or buffer full */
+ dp += ret;
+ }
+ pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
+ memset(max3421_hcd->err_stat, 0,
+ sizeof(max3421_hcd->err_stat));
+ last_time = jiffies;
+
+ dump_eps(hcd);
+ }
+ }
+#endif
+ return 1;
+}
+
+static int
+max3421_reset_hcd(struct usb_hcd *hcd)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int timeout;
+
+ /* perform a chip reset and wait for OSCIRQ signal to appear: */
+ spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
+ /* clear reset: */
+ spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
+ timeout = 1000;
+ while (1) {
+ if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
+ & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
+ break;
+ if (--timeout < 0) {
+ dev_err(&spi->dev,
+ "timed out waiting for oscillator OK signal");
+ return 1;
+ }
+ cond_resched();
+ }
+
+ /*
+ * Turn on host mode, automatic generation of SOF packets, and
+ * enable pull-down registers on DM/DP:
+ */
+ max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
+ BIT(MAX3421_MODE_SOFKAENAB_BIT) |
+ BIT(MAX3421_MODE_DMPULLDN_BIT) |
+ BIT(MAX3421_MODE_DPPULLDN_BIT));
+ spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+ /* reset frame-number: */
+ max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
+ spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
+
+ /* sample the state of the D+ and D- lines */
+ spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
+ max3421_detect_conn(hcd);
+
+ /* enable frame, connection-detected, and bus-event interrupts: */
+ max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
+ BIT(MAX3421_HI_CONDET_BIT) |
+ BIT(MAX3421_HI_BUSEVENT_BIT));
+ spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+ /* enable interrupts: */
+ spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
+ return 1;
+}
+
+static int
+max3421_urb_done(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ struct urb *urb;
+ int status;
+
+ status = max3421_hcd->urb_done;
+ max3421_hcd->urb_done = 0;
+ if (status > 0)
+ status = 0;
+ urb = max3421_hcd->curr_urb;
+ if (urb) {
+ max3421_hcd->curr_urb = NULL;
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ /* must be called without the HCD spinlock: */
+ usb_hcd_giveback_urb(hcd, urb, status);
+ }
+ return 1;
+}
+
+static int
+max3421_spi_thread(void *dev_id)
+{
+ struct usb_hcd *hcd = dev_id;
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ int i, i_worked = 1;
+
+ /* set full-duplex SPI mode, low-active interrupt pin: */
+ spi_wr8(hcd, MAX3421_REG_PINCTL,
+ (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */
+ BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */
+
+ while (!kthread_should_stop()) {
+ max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
+ if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
+ break;
+ dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
+ msleep(10000);
+ }
+ dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
+ max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
+ spi->irq);
+
+ while (!kthread_should_stop()) {
+ if (!i_worked) {
+ /*
+ * We'll be waiting for wakeups from the hard
+ * interrupt handler, so now is a good time to
+ * sync our hien with the chip:
+ */
+ spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (max3421_hcd->do_enable_irq) {
+ max3421_hcd->do_enable_irq = 0;
+ enable_irq(spi->irq);
+ }
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+
+ i_worked = 0;
+
+ if (max3421_hcd->urb_done)
+ i_worked |= max3421_urb_done(hcd);
+ else if (max3421_handle_irqs(hcd))
+ i_worked = 1;
+ else if (!max3421_hcd->curr_urb)
+ i_worked |= max3421_select_and_start_urb(hcd);
+
+ if (max3421_hcd->do_reset_hcd) {
+ /* reset the HCD: */
+ max3421_hcd->do_reset_hcd = 0;
+ i_worked |= max3421_reset_hcd(hcd);
+ }
+ if (max3421_hcd->do_reset_port) {
+ /* perform a USB bus reset: */
+ max3421_hcd->do_reset_port = 0;
+ spi_wr8(hcd, MAX3421_REG_HCTL,
+ BIT(MAX3421_HCTL_BUSRST_BIT));
+ i_worked = 1;
+ }
+ if (max3421_hcd->do_check_unlink) {
+ max3421_hcd->do_check_unlink = 0;
+ i_worked |= max3421_check_unlink(hcd);
+ }
+ if (max3421_hcd->do_iopin_update) {
+ /*
+ * IOPINS1/IOPINS2 do not auto-increment, so we can't
+ * use spi_wr_buf().
+ */
+ for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
+ u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
+
+ val = ((val & 0xf0) |
+ (max3421_hcd->iopins[i] & 0x0f));
+ spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
+ max3421_hcd->iopins[i] = val;
+ }
+ max3421_hcd->do_iopin_update = 0;
+ i_worked = 1;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ dev_info(&spi->dev, "SPI thread exiting");
+ return 0;
+}
+
+static int
+max3421_reset_port(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED);
+ max3421_hcd->do_reset_port = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ return 0;
+}
+
+static int
+max3421_reset(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ hcd->self.sg_tablesize = 0;
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_FULL;
+ max3421_hcd->do_reset_hcd = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ return 0;
+}
+
+static int
+max3421_start(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+ spin_lock_init(&max3421_hcd->lock);
+ max3421_hcd->rh_state = MAX3421_RH_RUNNING;
+
+ INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+ hcd->power_budget = POWER_BUDGET;
+ hcd->state = HC_STATE_RUNNING;
+ hcd->uses_new_polling = 1;
+ return 0;
+}
+
+static void
+max3421_stop(struct usb_hcd *hcd)
+{
+}
+
+static int
+max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_ep *max3421_ep;
+ unsigned long flags;
+ int retval;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ case PIPE_ISOCHRONOUS:
+ if (urb->interval < 0) {
+ dev_err(&spi->dev,
+ "%s: interval=%d for intr-/iso-pipe; expected > 0\n",
+ __func__, urb->interval);
+ return -EINVAL;
+ }
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ max3421_ep = urb->ep->hcpriv;
+ if (!max3421_ep) {
+ /* gets freed in max3421_endpoint_disable: */
+ max3421_ep = kzalloc(sizeof(struct max3421_ep), mem_flags);
+ if (!max3421_ep) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ max3421_ep->ep = urb->ep;
+ max3421_ep->last_active = max3421_hcd->frame_number;
+ urb->ep->hcpriv = max3421_ep;
+
+ list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
+ }
+
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval == 0) {
+ /* Since we added to the queue, restart scheduling: */
+ max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+ wake_up_process(max3421_hcd->spi_thread);
+ }
+
+out:
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static int
+max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ /*
+ * This will set urb->unlinked which in turn causes the entry
+ * to be dropped at the next opportunity.
+ */
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval == 0) {
+ max3421_hcd->do_check_unlink = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+ }
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static void
+max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ if (ep->hcpriv) {
+ struct max3421_ep *max3421_ep = ep->hcpriv;
+
+ /* remove myself from the ep_list: */
+ if (!list_empty(&max3421_ep->ep_list))
+ list_del(&max3421_ep->ep_list);
+ kfree(max3421_ep);
+ ep->hcpriv = NULL;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static int
+max3421_get_frame_number(struct usb_hcd *hcd)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ return max3421_hcd->frame_number;
+}
+
+/*
+ * Should return a non-zero value when any port is undergoing a resume
+ * transition while the root hub is suspended.
+ */
+static int
+max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ goto done;
+
+ *buf = 0;
+ if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
+ *buf = (1 << 1); /* a hub over-current condition exists */
+ dev_dbg(hcd->self.controller,
+ "port status 0x%08x has changes\n",
+ max3421_hcd->port_status);
+ retval = 1;
+ if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+ }
+done:
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static inline void
+hub_descriptor(struct usb_hub_descriptor *desc)
+{
+ memset(desc, 0, sizeof(*desc));
+ /*
+ * See Table 11-13: Hub Descriptor in USB 2.0 spec.
+ */
+ desc->bDescriptorType = 0x29; /* hub descriptor */
+ desc->bDescLength = 9;
+ desc->wHubCharacteristics = cpu_to_le16(0x0001);
+ desc->bNbrPorts = 1;
+}
+
+/*
+ * Set the MAX3421E general-purpose output with number PIN_NUMBER to
+ * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For
+ * any other value, this function acts as a no-op.
+ */
+static void
+max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
+{
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ u8 mask, idx;
+
+ --pin_number;
+ if (pin_number > 7)
+ return;
+
+ mask = 1u << pin_number;
+ idx = pin_number / 4;
+
+ if (value)
+ max3421_hcd->iopins[idx] |= mask;
+ else
+ max3421_hcd->iopins[idx] &= ~mask;
+ max3421_hcd->do_iopin_update = 1;
+ wake_up_process(max3421_hcd->spi_thread);
+}
+
+static int
+max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
+ char *buf, u16 length)
+{
+ struct spi_device *spi = to_spi_device(hcd->self.controller);
+ struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+ struct max3421_hcd_platform_data *pdata;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ pdata = spi->dev.platform_data;
+
+ switch (type_req) {
+ case ClearHubFeature:
+ break;
+ case ClearPortFeature:
+ switch (value) {
+ case USB_PORT_FEAT_SUSPEND:
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller, "power-off\n");
+ max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+ !pdata->vbus_active_level);
+ /* FALLS THROUGH */
+ default:
+ max3421_hcd->port_status &= ~(1 << value);
+ }
+ break;
+ case GetHubDescriptor:
+ hub_descriptor((struct usb_hub_descriptor *) buf);
+ break;
+
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ case GetPortErrorCount:
+ case SetHubDepth:
+ /* USB3 only */
+ goto error;
+
+ case GetHubStatus:
+ *(__le32 *) buf = cpu_to_le32(0);
+ break;
+
+ case GetPortStatus:
+ if (index != 1) {
+ retval = -EPIPE;
+ goto error;
+ }
+ ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
+ ((__le16 *) buf)[1] =
+ cpu_to_le16(max3421_hcd->port_status >> 16);
+ break;
+
+ case SetHubFeature:
+ retval = -EPIPE;
+ break;
+
+ case SetPortFeature:
+ switch (value) {
+ case USB_PORT_FEAT_LINK_STATE:
+ case USB_PORT_FEAT_U1_TIMEOUT:
+ case USB_PORT_FEAT_U2_TIMEOUT:
+ case USB_PORT_FEAT_BH_PORT_RESET:
+ goto error;
+ case USB_PORT_FEAT_SUSPEND:
+ if (max3421_hcd->active)
+ max3421_hcd->port_status |=
+ USB_PORT_STAT_SUSPEND;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller, "power-on\n");
+ max3421_hcd->port_status |= USB_PORT_STAT_POWER;
+ max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+ pdata->vbus_active_level);
+ break;
+ case USB_PORT_FEAT_RESET:
+ max3421_reset_port(hcd);
+ /* FALLS THROUGH */
+ default:
+ if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
+ != 0)
+ max3421_hcd->port_status |= (1 << value);
+ }
+ break;
+
+ default:
+ dev_dbg(hcd->self.controller,
+ "hub control req%04x v%04x i%04x l%d\n",
+ type_req, value, index, length);
+error: /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+ return retval;
+}
+
+static int
+max3421_bus_suspend(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+static int
+max3421_bus_resume(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+/*
+ * The SPI driver already takes care of DMA-mapping/unmapping, so no
+ * reason to do it twice.
+ */
+static int
+max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ return 0;
+}
+
+static void
+max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+}
+
+static struct hc_driver max3421_hcd_desc = {
+ .description = "max3421",
+ .product_desc = DRIVER_DESC,
+ .hcd_priv_size = sizeof(struct max3421_hcd),
+ .flags = HCD_USB11,
+ .reset = max3421_reset,
+ .start = max3421_start,
+ .stop = max3421_stop,
+ .get_frame_number = max3421_get_frame_number,
+ .urb_enqueue = max3421_urb_enqueue,
+ .urb_dequeue = max3421_urb_dequeue,
+ .map_urb_for_dma = max3421_map_urb_for_dma,
+ .unmap_urb_for_dma = max3421_unmap_urb_for_dma,
+ .endpoint_disable = max3421_endpoint_disable,
+ .hub_status_data = max3421_hub_status_data,
+ .hub_control = max3421_hub_control,
+ .bus_suspend = max3421_bus_suspend,
+ .bus_resume = max3421_bus_resume,
+};
+
+static int
+max3421_probe(struct spi_device *spi)
+{
+ struct max3421_hcd *max3421_hcd;
+ struct usb_hcd *hcd = NULL;
+ int retval = -ENOMEM;
+
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI bus");
+ return -EFAULT;
+ }
+
+ hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
+ dev_name(&spi->dev));
+ if (!hcd) {
+ dev_err(&spi->dev, "failed to create HCD structure\n");
+ goto error;
+ }
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ max3421_hcd = hcd_to_max3421(hcd);
+ max3421_hcd->next = max3421_hcd_list;
+ max3421_hcd_list = max3421_hcd;
+ INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+ max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
+ if (!max3421_hcd->tx) {
+ dev_err(&spi->dev, "failed to kmalloc tx buffer\n");
+ goto error;
+ }
+ max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
+ if (!max3421_hcd->rx) {
+ dev_err(&spi->dev, "failed to kmalloc rx buffer\n");
+ goto error;
+ }
+
+ max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
+ "max3421_spi_thread");
+ if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
+ dev_err(&spi->dev,
+ "failed to create SPI thread (out of memory)\n");
+ goto error;
+ }
+
+ retval = usb_add_hcd(hcd, 0, 0);
+ if (retval) {
+ dev_err(&spi->dev, "failed to add HCD\n");
+ goto error;
+ }
+
+ retval = request_irq(spi->irq, max3421_irq_handler,
+ IRQF_TRIGGER_LOW, "max3421", hcd);
+ if (retval < 0) {
+ dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
+ goto error;
+ }
+ return 0;
+
+error:
+ if (hcd) {
+ kfree(max3421_hcd->tx);
+ kfree(max3421_hcd->rx);
+ if (max3421_hcd->spi_thread)
+ kthread_stop(max3421_hcd->spi_thread);
+ usb_put_hcd(hcd);
+ }
+ return retval;
+}
+
+static int
+max3421_remove(struct spi_device *spi)
+{
+ struct max3421_hcd *max3421_hcd = NULL, **prev;
+ struct usb_hcd *hcd = NULL;
+ unsigned long flags;
+
+ for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
+ max3421_hcd = *prev;
+ hcd = max3421_to_hcd(max3421_hcd);
+ if (hcd->self.controller == &spi->dev)
+ break;
+ }
+ if (!max3421_hcd) {
+ dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
+ spi);
+ return -ENODEV;
+ }
+
+ usb_remove_hcd(hcd);
+
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+ kthread_stop(max3421_hcd->spi_thread);
+ *prev = max3421_hcd->next;
+
+ spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+ free_irq(spi->irq, hcd);
+
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+static struct spi_driver max3421_driver = {
+ .probe = max3421_probe,
+ .remove = max3421_remove,
+ .driver = {
+ .name = "max3421-hcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(max3421_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Mosberger <davidm@egauge.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/octeon2-common.c b/drivers/usb/host/octeon2-common.c
new file mode 100644
index 00000000000..d9df423f3d1
--- /dev/null
+++ b/drivers/usb/host/octeon2-common.c
@@ -0,0 +1,200 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010, 2011 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
+
+static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
+
+static int octeon2_usb_clock_start_cnt;
+
+void octeon2_usb_clocks_start(void)
+{
+ u64 div;
+ union cvmx_uctlx_if_ena if_ena;
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ union cvmx_uctlx_uphy_ctl_status uphy_ctl_status;
+ union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
+ int i;
+ unsigned long io_clk_64_to_ns;
+
+
+ mutex_lock(&octeon2_usb_clocks_mutex);
+
+ octeon2_usb_clock_start_cnt++;
+ if (octeon2_usb_clock_start_cnt != 1)
+ goto exit;
+
+ io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
+
+ /*
+ * Step 1: Wait for voltages stable. That surely happened
+ * before starting the kernel.
+ *
+ * Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1
+ */
+ if_ena.u64 = 0;
+ if_ena.s.en = 1;
+ cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+
+ /* Step 3: Configure the reference clock, PHY, and HCLK */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+
+ /*
+ * If the UCTL looks like it has already been started, skip
+ * the initialization, otherwise bus errors are obtained.
+ */
+ if (clk_rst_ctl.s.hrst)
+ goto end_clock;
+ /* 3a */
+ clk_rst_ctl.s.p_por = 1;
+ clk_rst_ctl.s.hrst = 0;
+ clk_rst_ctl.s.p_prst = 0;
+ clk_rst_ctl.s.h_clkdiv_rst = 0;
+ clk_rst_ctl.s.o_clkdiv_rst = 0;
+ clk_rst_ctl.s.h_clkdiv_en = 0;
+ clk_rst_ctl.s.o_clkdiv_en = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3b */
+ /* 12MHz crystal. */
+ clk_rst_ctl.s.p_refclk_sel = 0;
+ clk_rst_ctl.s.p_refclk_div = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3c */
+ div = octeon_get_io_clock_rate() / 130000000ull;
+
+ switch (div) {
+ case 0:
+ div = 1;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ break;
+ case 5:
+ div = 4;
+ break;
+ case 6:
+ case 7:
+ div = 6;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ div = 8;
+ break;
+ default:
+ div = 12;
+ break;
+ }
+ clk_rst_ctl.s.h_div = div;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* Read it back, */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ clk_rst_ctl.s.h_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* 3d */
+ clk_rst_ctl.s.h_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3e: delay 64 io clocks */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 4: Program the power-on reset field in the UCTL
+ * clock-reset-control register.
+ */
+ clk_rst_ctl.s.p_por = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 5: Wait 1 ms for the PHY clock to start. */
+ mdelay(1);
+
+ /*
+ * Step 6: Program the reset input from automatic test
+ * equipment field in the UPHY CSR
+ */
+ uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0));
+ uphy_ctl_status.s.ate_reset = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
+
+ /* Step 7: Wait for at least 10ns. */
+ ndelay(10);
+
+ /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */
+ uphy_ctl_status.s.ate_reset = 0;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
+
+ /*
+ * Step 9: Wait for at least 20ns for UPHY to output PHY clock
+ * signals and OHCI_CLK48
+ */
+ ndelay(20);
+
+ /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
+ /* 10a */
+ clk_rst_ctl.s.o_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10b */
+ clk_rst_ctl.s.o_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10c */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 11: Program the PHY reset field:
+ * UCTL0_CLK_RST_CTL[P_PRST] = 1
+ */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 12: Wait 1 uS. */
+ udelay(1);
+
+ /* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */
+ clk_rst_ctl.s.hrst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+end_clock:
+ /* Now we can set some other registers. */
+
+ for (i = 0; i <= 1; i++) {
+ port_ctl_status.u64 =
+ cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+ /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
+ port_ctl_status.s.txvreftune = 15;
+ port_ctl_status.s.txrisetune = 1;
+ port_ctl_status.s.txpreemphasistune = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+ port_ctl_status.u64);
+ }
+
+ /* Set uSOF cycle period to 60,000 bits. */
+ cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+exit:
+ mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+EXPORT_SYMBOL(octeon2_usb_clocks_start);
+
+void octeon2_usb_clocks_stop(void)
+{
+ mutex_lock(&octeon2_usb_clocks_mutex);
+ octeon2_usb_clock_start_cnt--;
+ mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+EXPORT_SYMBOL(octeon2_usb_clocks_stop);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 4ed228a8994..e49eb4f90f5 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -13,20 +13,38 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <mach/hardware.h>
#include <asm/gpio.h>
-#include <mach/board.h>
#include <mach/cpu.h>
-#ifndef CONFIG_ARCH_AT91
-#error "CONFIG_ARCH_AT91 must be defined."
-#endif
+#include "ohci.h"
+
+#define valid_port(index) ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
+#define at91_for_each_port(index) \
+ for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++)
+
+/* interface, function and usb clocks; sometimes also an AHB clock */
+static struct clk *iclk, *fclk, *uclk, *hclk;
/* interface and function clocks; sometimes also an AHB clock */
-static struct clk *iclk, *fclk, *hclk;
+
+#define DRIVER_DESC "OHCI Atmel driver"
+
+static const char hcd_name[] = "ohci-atmel";
+
+static struct hc_driver __read_mostly ohci_at91_hc_driver;
static int clocked;
extern int usb_disabled(void);
@@ -35,19 +53,23 @@ extern int usb_disabled(void);
static void at91_start_clock(void)
{
- if (cpu_is_at91sam9261())
- clk_enable(hclk);
- clk_enable(iclk);
- clk_enable(fclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(uclk, 48000000);
+ clk_prepare_enable(uclk);
+ }
+ clk_prepare_enable(hclk);
+ clk_prepare_enable(iclk);
+ clk_prepare_enable(fclk);
clocked = 1;
}
static void at91_stop_clock(void)
{
- clk_disable(fclk);
- clk_disable(iclk);
- if (cpu_is_at91sam9261())
- clk_disable(hclk);
+ clk_disable_unprepare(fclk);
+ clk_disable_unprepare(iclk);
+ clk_disable_unprepare(hclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_disable_unprepare(uclk);
clocked = 0;
}
@@ -107,65 +129,80 @@ static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
+ struct at91_usbh_data *board;
+ struct ohci_hcd *ohci;
int retval;
struct usb_hcd *hcd = NULL;
-
- if (pdev->num_resources != 2) {
- pr_debug("hcd probe: invalid num_resources");
- return -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "hcd probe: missing memory resource\n");
+ return -ENXIO;
}
- if ((pdev->resource[0].flags != IORESOURCE_MEM)
- || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
- pr_debug("hcd probe: invalid resource type\n");
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_dbg(dev, "hcd probe: missing irq resource\n");
+ return irq;
}
- hcd = usb_create_hcd(driver, &pdev->dev, "at91");
+ hcd = usb_create_hcd(driver, dev, "at91");
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed\n");
- retval = -EBUSY;
- goto err1;
+ hcd->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hcd->regs)) {
+ retval = PTR_ERR(hcd->regs);
+ goto err;
}
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed\n");
- retval = -EIO;
- goto err2;
+ iclk = devm_clk_get(dev, "ohci_clk");
+ if (IS_ERR(iclk)) {
+ dev_err(dev, "failed to get ohci_clk\n");
+ retval = PTR_ERR(iclk);
+ goto err;
+ }
+ fclk = devm_clk_get(dev, "uhpck");
+ if (IS_ERR(fclk)) {
+ dev_err(dev, "failed to get uhpck\n");
+ retval = PTR_ERR(fclk);
+ goto err;
+ }
+ hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(hclk)) {
+ dev_err(dev, "failed to get hclk\n");
+ retval = PTR_ERR(hclk);
+ goto err;
+ }
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ uclk = devm_clk_get(dev, "usb_clk");
+ if (IS_ERR(uclk)) {
+ dev_err(dev, "failed to get uclk\n");
+ retval = PTR_ERR(uclk);
+ goto err;
+ }
}
- iclk = clk_get(&pdev->dev, "ohci_clk");
- fclk = clk_get(&pdev->dev, "uhpck");
- if (cpu_is_at91sam9261())
- hclk = clk_get(&pdev->dev, "hck0");
-
+ board = hcd->self.controller->platform_data;
+ ohci = hcd_to_ohci(hcd);
+ ohci->num_ports = board->ports;
at91_start_hc(pdev);
- ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED);
- if (retval == 0)
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval == 0) {
+ device_wakeup_enable(hcd->self.controller);
return retval;
+ }
/* Error handling */
at91_stop_hc(pdev);
- if (cpu_is_at91sam9261())
- clk_put(hclk);
- clk_put(fclk);
- clk_put(iclk);
-
- iounmap(hcd->regs);
-
- err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- err1:
+ err:
usb_put_hcd(hcd);
return retval;
}
@@ -188,103 +225,347 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
{
usb_remove_hcd(hcd);
at91_stop_hc(pdev);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
+}
+
+/*-------------------------------------------------------------------------*/
+static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
+{
+ if (!valid_port(port))
+ return;
- if (cpu_is_at91sam9261())
- clk_put(hclk);
- clk_put(fclk);
- clk_put(iclk);
- fclk = iclk = hclk = NULL;
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
+ return;
- dev_set_drvdata(&pdev->dev, NULL);
+ gpio_set_value(pdata->vbus_pin[port],
+ pdata->vbus_pin_active_low[port] ^ enable);
}
-/*-------------------------------------------------------------------------*/
+static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
+{
+ if (!valid_port(port))
+ return -EINVAL;
+
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
+ return -EINVAL;
+
+ return gpio_get_value(pdata->vbus_pin[port]) ^
+ pdata->vbus_pin_active_low[port];
+}
-static int __devinit
-ohci_at91_start (struct usb_hcd *hcd)
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- struct at91_usbh_data *board = hcd->self.controller->platform_data;
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
+ struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+ int length = ohci_hub_status_data(hcd, buf);
+ int port;
+
+ at91_for_each_port(port) {
+ if (pdata->overcurrent_changed[port]) {
+ if (!length)
+ length = 1;
+ buf[0] |= 1 << (port + 1);
+ }
+ }
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
+ return length;
+}
- ohci->num_ports = board->ports;
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
+ struct usb_hub_descriptor *desc;
+ int ret = -EINVAL;
+ u32 *data = (u32 *)buf;
+
+ dev_dbg(hcd->self.controller,
+ "ohci_at91_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
+ hcd, typeReq, wValue, wIndex, buf, wLength);
+
+ wIndex--;
+
+ switch (typeReq) {
+ case SetPortFeature:
+ if (wValue == USB_PORT_FEAT_POWER) {
+ dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
+ if (valid_port(wIndex)) {
+ ohci_at91_usb_set_power(pdata, wIndex, 1);
+ ret = 0;
+ }
+
+ goto out;
+ }
+ break;
+
+ case ClearPortFeature:
+ switch (wValue) {
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: C_OVER_CURRENT\n");
+
+ if (valid_port(wIndex)) {
+ pdata->overcurrent_changed[wIndex] = 0;
+ pdata->overcurrent_status[wIndex] = 0;
+ }
+
+ goto out;
+
+ case USB_PORT_FEAT_OVER_CURRENT:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: OVER_CURRENT\n");
+
+ if (valid_port(wIndex))
+ pdata->overcurrent_status[wIndex] = 0;
+
+ goto out;
+
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(hcd->self.controller,
+ "ClearPortFeature: POWER\n");
+
+ if (valid_port(wIndex)) {
+ ohci_at91_usb_set_power(pdata, wIndex, 0);
+ return 0;
+ }
+ }
+ break;
+ }
+
+ ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
+ if (ret)
+ goto out;
+
+ switch (typeReq) {
+ case GetHubDescriptor:
+
+ /* update the hub's descriptor */
+
+ desc = (struct usb_hub_descriptor *)buf;
+
+ dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n",
+ desc->wHubCharacteristics);
+
+ /* remove the old configurations for power-switching, and
+ * over-current protection, and insert our new configuration
+ */
+
+ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
+ desc->wHubCharacteristics |= cpu_to_le16(0x0001);
+
+ if (pdata->overcurrent_supported) {
+ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
+ desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001);
+ }
+
+ dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
+ desc->wHubCharacteristics);
- if ((ret = ohci_run(ohci)) < 0) {
- err("can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
return ret;
+
+ case GetPortStatus:
+ /* check port status */
+
+ dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
+
+ if (valid_port(wIndex)) {
+ if (!ohci_at91_usb_get_power(pdata, wIndex))
+ *data &= ~cpu_to_le32(RH_PS_PPS);
+
+ if (pdata->overcurrent_changed[wIndex])
+ *data |= cpu_to_le32(RH_PS_OCIC);
+
+ if (pdata->overcurrent_status[wIndex])
+ *data |= cpu_to_le32(RH_PS_POCI);
+ }
}
- return 0;
+
+ out:
+ return ret;
}
/*-------------------------------------------------------------------------*/
-static const struct hc_driver ohci_at91_hc_driver = {
- .description = hcd_name,
- .product_desc = "AT91 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
+static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
+ int val, gpio, port;
+
+ /* From the GPIO notifying the over-current situation, find
+ * out the corresponding port */
+ at91_for_each_port(port) {
+ if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
+ gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
+ gpio = pdata->overcurrent_pin[port];
+ break;
+ }
+ }
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
+ if (port == AT91_MAX_USBH_PORTS) {
+ dev_err(& pdev->dev, "overcurrent interrupt from unknown GPIO\n");
+ return IRQ_HANDLED;
+ }
- /*
- * basic lifecycle operations
- */
- .start = ohci_at91_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
+ val = gpio_get_value(gpio);
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
+ /* When notified of an over-current situation, disable power
+ on the corresponding port, and mark this port in
+ over-current. */
+ if (!val) {
+ ohci_at91_usb_set_power(pdata, port, 0);
+ pdata->overcurrent_status[port] = 1;
+ pdata->overcurrent_changed[port] = 1;
+ }
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
+ dev_dbg(& pdev->dev, "overcurrent situation %s\n",
+ val ? "exited" : "notified");
- /*
- * root hub support
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id at91_ohci_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-ohci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
+
+static int ohci_at91_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int i, gpio, ret;
+ enum of_gpio_flags flags;
+ struct at91_usbh_data *pdata;
+ u32 ports;
+
+ if (!np)
+ return 0;
+
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
*/
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(np, "num-ports", &ports))
+ pdata->ports = ports;
+
+ at91_for_each_port(i) {
+ gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, &flags);
+ pdata->vbus_pin[i] = gpio;
+ if (!gpio_is_valid(gpio))
+ continue;
+ pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
+ }
+
+ at91_for_each_port(i)
+ pdata->overcurrent_pin[i] =
+ of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static int ohci_at91_of_init(struct platform_device *pdev)
+{
+ return 0;
+}
#endif
- .start_port_reset = ohci_start_port_reset,
-};
/*-------------------------------------------------------------------------*/
static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
{
- struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ struct at91_usbh_data *pdata;
int i;
+ int gpio;
+ int ret;
+
+ ret = ohci_at91_of_init(pdev);
+ if (ret)
+ return ret;
+
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
- /* REVISIT make the driver support per-port power switching,
- * and also overcurrent detection. Here we assume the ports
- * are always powered while this driver is active, and use
- * active-low power switches.
- */
- for (i = 0; i < pdata->ports; i++) {
- if (pdata->vbus_pin[i] <= 0)
+ at91_for_each_port(i) {
+ /*
+ * do not configure PIO if not in relation with
+ * real USB port on board
+ */
+ if (i >= pdata->ports) {
+ pdata->vbus_pin[i] = -EINVAL;
+ pdata->overcurrent_pin[i] = -EINVAL;
+ break;
+ }
+
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
+ continue;
+ gpio = pdata->vbus_pin[i];
+
+ ret = gpio_request(gpio, "ohci_vbus");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't request vbus gpio %d\n", gpio);
continue;
- gpio_request(pdata->vbus_pin[i], "ohci_vbus");
- gpio_direction_output(pdata->vbus_pin[i], 0);
+ }
+ ret = gpio_direction_output(gpio,
+ !pdata->vbus_pin_active_low[i]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't put vbus gpio %d as output %d\n",
+ gpio, !pdata->vbus_pin_active_low[i]);
+ gpio_free(gpio);
+ continue;
+ }
+
+ ohci_at91_usb_set_power(pdata, i, 1);
+ }
+
+ at91_for_each_port(i) {
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
+ continue;
+ gpio = pdata->overcurrent_pin[i];
+
+ ret = gpio_request(gpio, "ohci_overcurrent");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't request overcurrent gpio %d\n",
+ gpio);
+ continue;
+ }
+
+ ret = gpio_direction_input(gpio);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't configure overcurrent gpio %d as input\n",
+ gpio);
+ gpio_free(gpio);
+ continue;
+ }
+
+ ret = request_irq(gpio_to_irq(gpio),
+ ohci_hcd_at91_overcurrent_irq,
+ IRQF_SHARED, "ohci_overcurrent", pdev);
+ if (ret) {
+ gpio_free(gpio);
+ dev_err(&pdev->dev,
+ "can't get gpio IRQ for overcurrent\n");
+ }
}
}
@@ -294,16 +575,23 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
{
- struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
int i;
if (pdata) {
- for (i = 0; i < pdata->ports; i++) {
- if (pdata->vbus_pin[i] <= 0)
+ at91_for_each_port(i) {
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
- gpio_direction_output(pdata->vbus_pin[i], 1);
+ ohci_at91_usb_set_power(pdata, i, 0);
gpio_free(pdata->vbus_pin[i]);
}
+
+ at91_for_each_port(i) {
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
+ continue;
+ free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
+ gpio_free(pdata->overcurrent_pin[i]);
+ }
}
device_init_wakeup(&pdev->dev, 0);
@@ -318,10 +606,17 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
- if (device_may_wakeup(&pdev->dev))
+ if (do_wakeup)
enable_irq_wake(hcd->irq);
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret) {
+ disable_irq_wake(hcd->irq);
+ return ret;
+ }
/*
* The integrated transceivers seem unable to notice disconnect,
* reconnect, or wakeup without the 48 MHz clock active. so for
@@ -330,11 +625,17 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
* REVISIT: some boards will be able to turn VBUS off...
*/
if (at91_suspend_entering_slow_clock()) {
- ohci_usb_reset (ohci);
+ ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ ohci->hc_control &= OHCI_CTRL_RWC;
+ ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
+
+ /* flush the writes */
+ (void) ohci_readl (ohci, &ohci->regs->control);
at91_stop_clock();
}
- return 0;
+ return ret;
}
static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
@@ -347,7 +648,7 @@ static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
if (!clocked)
at91_start_clock();
- ohci_finish_controller_resume(hcd);
+ ohci_resume(hcd, false);
return 0;
}
#else
@@ -355,8 +656,6 @@ static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
#define ohci_hcd_at91_drv_resume NULL
#endif
-MODULE_ALIAS("platform:at91_ohci");
-
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
.remove = ohci_hcd_at91_drv_remove,
@@ -366,5 +665,40 @@ static struct platform_driver ohci_hcd_at91_driver = {
.driver = {
.name = "at91_ohci",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91_ohci_dt_ids),
},
};
+
+static int __init ohci_at91_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&ohci_at91_hc_driver, NULL);
+
+ /*
+ * The Atmel HW has some unusual quirks, which require Atmel-specific
+ * workarounds. We override certain hc_driver functions here to
+ * achieve that. We explicitly do not enhance ohci_driver_overrides to
+ * allow this more easily, since this is an unusual case, and we don't
+ * want to encourage others to override these functions by making it
+ * too easy.
+ */
+
+ ohci_at91_hc_driver.hub_status_data = ohci_at91_hub_status_data;
+ ohci_at91_hc_driver.hub_control = ohci_at91_hub_control;
+
+ return platform_driver_register(&ohci_hcd_at91_driver);
+}
+module_init(ohci_at91_init);
+
+static void __exit ohci_at91_cleanup(void)
+{
+ platform_driver_unregister(&ohci_hcd_at91_driver);
+}
+module_exit(ohci_at91_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_ohci");
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
deleted file mode 100644
index 2ac4e022a13..00000000000
--- a/drivers/usb/host/ohci-au1xxx.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus Glue for AMD Alchemy Au1xxx
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- * Modified for AMD Alchemy Au1xxx
- * by Matt Porter <mporter@kernel.crashing.org>
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/platform_device.h>
-#include <linux/signal.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-#ifndef CONFIG_SOC_AU1200
-
-#define USBH_ENABLE_BE (1<<0)
-#define USBH_ENABLE_C (1<<1)
-#define USBH_ENABLE_E (1<<2)
-#define USBH_ENABLE_CE (1<<3)
-#define USBH_ENABLE_RD (1<<4)
-
-#ifdef __LITTLE_ENDIAN
-#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
-#elif __BIG_ENDIAN
-#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
- USBH_ENABLE_BE)
-#else
-#error not byte order defined
-#endif
-
-#else /* Au1200 */
-
-#define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG)
-#define USB_MCFG_PFEN (1<<31)
-#define USB_MCFG_RDCOMB (1<<30)
-#define USB_MCFG_SSDEN (1<<23)
-#define USB_MCFG_OHCCLKEN (1<<16)
-#ifdef CONFIG_DMA_COHERENT
-#define USB_MCFG_UCAM (1<<7)
-#else
-#define USB_MCFG_UCAM (0)
-#endif
-#define USB_MCFG_OBMEN (1<<1)
-#define USB_MCFG_OMEMEN (1<<0)
-
-#define USBH_ENABLE_CE USB_MCFG_OHCCLKEN
-
-#define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \
- USBH_ENABLE_CE | USB_MCFG_SSDEN | \
- USB_MCFG_UCAM | \
- USB_MCFG_OBMEN | USB_MCFG_OMEMEN)
-
-#define USBH_DISABLE (USB_MCFG_OBMEN | USB_MCFG_OMEMEN)
-
-#endif /* Au1200 */
-
-extern int usb_disabled(void);
-
-static void au1xxx_start_ohc(void)
-{
- /* enable host controller */
-#ifndef CONFIG_SOC_AU1200
- au_writel(USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- /* wait for reset complete (read register twice; see au1500 errata) */
- while (au_readl(USB_HOST_CONFIG),
- !(au_readl(USB_HOST_CONFIG) & USBH_ENABLE_RD))
- udelay(1000);
-
-#else /* Au1200 */
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-
- au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG);
- au_sync();
- udelay(2000);
-#endif /* Au1200 */
-}
-
-static void au1xxx_stop_ohc(void)
-{
-#ifdef CONFIG_SOC_AU1200
- /* Disable mem */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG);
- au_sync();
- udelay(1000);
-#endif
- /* Disable clock */
- au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_ENABLE_CE, USB_HOST_CONFIG);
- au_sync();
-}
-
-static int __devinit ohci_au1xxx_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- ohci_dbg(ohci, "ohci_au1xxx_start, ohci:%p", ohci);
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static const struct hc_driver ohci_au1xxx_hc_driver = {
- .description = hcd_name,
- .product_desc = "Au1xxx OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_au1xxx_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
-{
- int ret;
- struct usb_hcd *hcd;
-
- if (usb_disabled())
- return -ENODEV;
-
-#if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT)
- /* Au1200 AB USB does not support coherent memory */
- if (!(read_c0_prid() & 0xff)) {
- printk(KERN_INFO "%s: this is chip revision AB !!\n",
- pdev->name);
- printk(KERN_INFO "%s: update your board or re-configure "
- "the kernel\n", pdev->name);
- return -ENODEV;
- }
-#endif
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ\n");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd(&ohci_au1xxx_hc_driver, &pdev->dev, "au1xxx");
- if (!hcd)
- return -ENOMEM;
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug("request_mem_region failed\n");
- ret = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- pr_debug("ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
- }
-
- au1xxx_start_ohc();
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_DISABLED | IRQF_SHARED);
- if (ret == 0) {
- platform_set_drvdata(pdev, hcd);
- return ret;
- }
-
- au1xxx_stop_ohc();
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- au1xxx_stop_ohc();
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ohci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
- pm_message_t message)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- unsigned long flags;
- int rc;
-
- rc = 0;
-
- /* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
- */
- spin_lock_irqsave(&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
- ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- (void)ohci_readl(ohci, &ohci->regs->intrdisable);
-
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW)
- ohci_usb_reset(ohci);
-
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- au1xxx_stop_ohc();
-bail:
- spin_unlock_irqrestore(&ohci->lock, flags);
-
- return rc;
-}
-
-static int ohci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- au1xxx_start_ohc();
-
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- ohci_finish_controller_resume(hcd);
-
- return 0;
-}
-#else
-#define ohci_hcd_au1xxx_drv_suspend NULL
-#define ohci_hcd_au1xxx_drv_resume NULL
-#endif
-
-static struct platform_driver ohci_hcd_au1xxx_driver = {
- .probe = ohci_hcd_au1xxx_drv_probe,
- .remove = ohci_hcd_au1xxx_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .suspend = ohci_hcd_au1xxx_drv_suspend,
- .resume = ohci_hcd_au1xxx_drv_resume,
- .driver = {
- .name = "au1xxx-ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:au1xxx-ohci");
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 00000000000..df06be6b47f
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,442 @@
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * TI DA8xx (OMAP-L1x) Bus Glue
+ *
+ * Derived from: ohci-omap.c and ohci-s3c2410.c
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <mach/da8xx.h>
+#include <linux/platform_data/usb-davinci.h>
+
+#ifndef CONFIG_ARCH_DAVINCI_DA8XX
+#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
+#endif
+
+#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
+
+static struct clk *usb11_clk;
+static struct clk *usb20_clk;
+
+/* Over-current indicator change bitmask */
+static volatile u16 ocic_mask;
+
+static void ohci_da8xx_clock(int on)
+{
+ u32 cfgchip2;
+
+ cfgchip2 = __raw_readl(CFGCHIP2);
+ if (on) {
+ clk_enable(usb11_clk);
+
+ /*
+ * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
+ * need to enable the USB 2.0 module clocking, start its PHY,
+ * and not allow it to stop the clock during USB 2.0 suspend.
+ */
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
+ clk_enable(usb20_clk);
+
+ cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
+ cfgchip2 |= CFGCHIP2_PHY_PLLON;
+ __raw_writel(cfgchip2, CFGCHIP2);
+
+ pr_info("Waiting for USB PHY clock good...\n");
+ while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
+ cpu_relax();
+ }
+
+ /* Enable USB 1.1 PHY */
+ cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
+ } else {
+ clk_disable(usb11_clk);
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
+ clk_disable(usb20_clk);
+
+ /* Disable USB 1.1 PHY */
+ cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
+ }
+ __raw_writel(cfgchip2, CFGCHIP2);
+}
+
+/*
+ * Handle the port over-current indicator change.
+ */
+static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
+ unsigned port)
+{
+ ocic_mask |= 1 << port;
+
+ /* Once over-current is detected, the port needs to be powered down */
+ if (hub->get_oci(port) > 0)
+ hub->set_power(port, 0);
+}
+
+static int ohci_da8xx_init(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+ u32 rh_a;
+
+ dev_dbg(dev, "starting USB controller\n");
+
+ ohci_da8xx_clock(1);
+
+ /*
+ * DA8xx only have 1 port connected to the pins but the HC root hub
+ * register A reports 2 ports, thus we'll have to override it...
+ */
+ ohci->num_ports = 1;
+
+ result = ohci_init(ohci);
+ if (result < 0)
+ return result;
+
+ /*
+ * Since we're providing a board-specific root hub port power control
+ * and over-current reporting, we have to override the HC root hub A
+ * register's default value, so that ohci_hub_control() could return
+ * the correct hub descriptor...
+ */
+ rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
+ if (hub->set_power) {
+ rh_a &= ~RH_A_NPS;
+ rh_a |= RH_A_PSM;
+ }
+ if (hub->get_oci) {
+ rh_a &= ~RH_A_NOCP;
+ rh_a |= RH_A_OCPM;
+ }
+ rh_a &= ~RH_A_POTPGT;
+ rh_a |= hub->potpgt << 24;
+ ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
+
+ return result;
+}
+
+static void ohci_da8xx_stop(struct usb_hcd *hcd)
+{
+ ohci_stop(hcd);
+ ohci_da8xx_clock(0);
+}
+
+static int ohci_da8xx_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+
+ result = ohci_run(ohci);
+ if (result < 0)
+ ohci_da8xx_stop(hcd);
+
+ return result;
+}
+
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ int length = ohci_hub_status_data(hcd, buf);
+
+ /* See if we have OCIC bit set on port 1 */
+ if (ocic_mask & (1 << 1)) {
+ dev_dbg(hcd->self.controller, "over-current indicator change "
+ "on port 1\n");
+
+ if (!length)
+ length = 1;
+
+ buf[0] |= 1 << 1;
+ }
+ return length;
+}
+
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ int temp;
+
+ switch (typeReq) {
+ case GetPortStatus:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
+
+ temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
+
+ /* The port power status (PPS) bit defaults to 1 */
+ if (hub->get_power && hub->get_power(wIndex) == 0)
+ temp &= ~RH_PS_PPS;
+
+ /* The port over-current indicator (POCI) bit is always 0 */
+ if (hub->get_oci && hub->get_oci(wIndex) > 0)
+ temp |= RH_PS_POCI;
+
+ /* The over-current indicator change (OCIC) bit is 0 too */
+ if (ocic_mask & (1 << wIndex))
+ temp |= RH_PS_OCIC;
+
+ put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
+ return 0;
+ case SetPortFeature:
+ temp = 1;
+ goto check_port;
+ case ClearPortFeature:
+ temp = 0;
+
+check_port:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex, "POWER");
+
+ if (!hub->set_power)
+ return -EPIPE;
+
+ return hub->set_power(wIndex, temp) ? -EPIPE : 0;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex,
+ "C_OVER_CURRENT");
+
+ if (temp)
+ ocic_mask |= 1 << wIndex;
+ else
+ ocic_mask &= ~(1 << wIndex);
+ return 0;
+ }
+ }
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
+
+static const struct hc_driver ohci_da8xx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "DA8xx OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_da8xx_init,
+ .start = ohci_da8xx_start,
+ .stop = ohci_da8xx_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_da8xx_hub_status_data,
+ .hub_control = ohci_da8xx_hub_control,
+
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+
+
+/**
+ * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
+ struct usb_hcd *hcd;
+ struct resource *mem;
+ int error, irq;
+
+ if (hub == NULL)
+ return -ENODEV;
+
+ usb11_clk = devm_clk_get(&pdev->dev, "usb11");
+ if (IS_ERR(usb11_clk))
+ return PTR_ERR(usb11_clk);
+
+ usb20_clk = devm_clk_get(&pdev->dev, "usb20");
+ if (IS_ERR(usb20_clk))
+ return PTR_ERR(usb20_clk);
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+ hcd->rsrc_start = mem->start;
+ hcd->rsrc_len = resource_size(mem);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(hcd->regs)) {
+ error = PTR_ERR(hcd->regs);
+ goto err;
+ }
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = -ENODEV;
+ goto err;
+ }
+ error = usb_add_hcd(hcd, irq, 0);
+ if (error)
+ goto err;
+
+ device_wakeup_enable(hcd->self.controller);
+
+ if (hub->ocic_notify) {
+ error = hub->ocic_notify(ohci_da8xx_ocic_handler);
+ if (!error)
+ return 0;
+ }
+
+ usb_remove_hcd(hcd);
+err:
+ usb_put_hcd(hcd);
+ return error;
+}
+
+/**
+ * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static inline void
+usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
+
+ hub->ocic_notify(NULL);
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+}
+
+static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
+{
+ return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
+}
+
+static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_hcd_da8xx_remove(hcd, dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_da8xx_suspend(struct platform_device *pdev,
+ pm_message_t message)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(&pdev->dev);
+ int ret;
+
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ret = ohci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+
+ ohci_da8xx_clock(0);
+ hcd->state = HC_STATE_SUSPENDED;
+
+ return ret;
+}
+
+static int ohci_da8xx_resume(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ohci_da8xx_clock(1);
+ dev->dev.power.power_state = PMSG_ON;
+ usb_hcd_resume_root_hub(hcd);
+ return 0;
+}
+#endif
+
+/*
+ * Driver definition to register with platform structure.
+ */
+static struct platform_driver ohci_hcd_da8xx_driver = {
+ .probe = ohci_hcd_da8xx_drv_probe,
+ .remove = ohci_hcd_da8xx_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ohci_da8xx_suspend,
+ .resume = ohci_da8xx_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ohci",
+ },
+};
+
+MODULE_ALIAS("platform:ohci");
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d3269656aa4..45032e933e1 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -9,68 +9,15 @@
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-
#define edstring(ed_type) ({ char *temp; \
switch (ed_type) { \
case PIPE_CONTROL: temp = "ctrl"; break; \
case PIPE_BULK: temp = "bulk"; break; \
case PIPE_INTERRUPT: temp = "intr"; break; \
default: temp = "isoc"; break; \
- }; temp;})
+ } temp;})
#define pipestring(pipe) edstring(usb_pipetype(pipe))
-/* debug| print the main components of an URB
- * small: 0) header + data packets 1) just header
- */
-static void __maybe_unused
-urb_print(struct urb * urb, char * str, int small, int status)
-{
- unsigned int pipe= urb->pipe;
-
- if (!urb->dev || !urb->dev->bus) {
- dbg("%s URB: no dev", str);
- return;
- }
-
-#ifndef OHCI_VERBOSE_DEBUG
- if (status != 0)
-#endif
- dbg("%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d stat=%d",
- str,
- urb,
- usb_pipedevice (pipe),
- usb_pipeendpoint (pipe),
- usb_pipeout (pipe)? "out" : "in",
- pipestring (pipe),
- urb->transfer_flags,
- urb->actual_length,
- urb->transfer_buffer_length,
- status);
-
-#ifdef OHCI_VERBOSE_DEBUG
- if (!small) {
- int i, len;
-
- if (usb_pipecontrol (pipe)) {
- printk (KERN_DEBUG __FILE__ ": setup(8):");
- for (i = 0; i < 8 ; i++)
- printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
- printk ("\n");
- }
- if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
- printk (KERN_DEBUG __FILE__ ": data(%d/%d):",
- urb->actual_length,
- urb->transfer_buffer_length);
- len = usb_pipeout (pipe)?
- urb->transfer_buffer_length: urb->actual_length;
- for (i = 0; i < 16 && i < len; i++)
- printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]);
- printk ("%s stat:%d\n", i < len? "...": "", status);
- }
- }
-#endif
-}
#define ohci_dbg_sw(ohci, next, size, format, arg...) \
do { \
@@ -82,6 +29,14 @@ urb_print(struct urb * urb, char * str, int small, int status)
ohci_dbg(ohci,format, ## arg ); \
} while (0);
+/* Version for use where "next" is the address of a local variable */
+#define ohci_dbg_nosw(ohci, next, size, format, arg...) \
+ do { \
+ unsigned s_len; \
+ s_len = scnprintf(*next, *size, format, ## arg); \
+ *size -= s_len; *next += s_len; \
+ } while (0);
+
static void ohci_dump_intr_mask (
struct ohci_hcd *ohci,
@@ -127,6 +82,19 @@ static char *hcfs2string (int state)
return "?";
}
+static const char *rh_state_string(struct ohci_hcd *ohci)
+{
+ switch (ohci->rh_state) {
+ case OHCI_RH_HALTED:
+ return "halted";
+ case OHCI_RH_SUSPENDED:
+ return "suspended";
+ case OHCI_RH_RUNNING:
+ return "running";
+ }
+ return "?";
+}
+
// dump control and status registers
static void
ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
@@ -136,9 +104,10 @@ ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
temp = ohci_readl (controller, &regs->revision) & 0xff;
ohci_dbg_sw (controller, next, size,
- "OHCI %d.%d, %s legacy support registers\n",
+ "OHCI %d.%d, %s legacy support registers, rh state %s\n",
0x03 & (temp >> 4), (temp & 0x0f),
- (temp & 0x0100) ? "with" : "NO");
+ (temp & 0x0100) ? "with" : "NO",
+ rh_state_string(controller));
temp = ohci_readl (controller, &regs->control);
ohci_dbg_sw (controller, next, size,
@@ -385,22 +354,8 @@ ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
}
}
-#else
-static inline void ohci_dump (struct ohci_hcd *controller, int verbose) {}
-
-#undef OHCI_VERBOSE_DEBUG
-
-#endif /* DEBUG */
-
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILES
-
-static inline void create_debug_files (struct ohci_hcd *bus) { }
-static inline void remove_debug_files (struct ohci_hcd *bus) { }
-
-#else
-
static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
@@ -413,25 +368,28 @@ static const struct file_operations debug_async_fops = {
.open = debug_async_open,
.read = debug_output,
.release = debug_close,
+ .llseek = default_llseek,
};
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
.read = debug_output,
.release = debug_close,
+ .llseek = default_llseek,
};
static const struct file_operations debug_registers_fops = {
.owner = THIS_MODULE,
.open = debug_registers_open,
.read = debug_output,
.release = debug_close,
+ .llseek = default_llseek,
};
static struct dentry *ohci_debug_root;
struct debug_buffer {
ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
- struct device *dev;
+ struct ohci_hcd *ohci;
struct mutex mutex; /* protect filling of buffer */
size_t count; /* number of characters filled into buffer */
char *page;
@@ -505,15 +463,11 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
static ssize_t fill_async_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
- struct usb_hcd *hcd;
struct ohci_hcd *ohci;
size_t temp;
unsigned long flags;
- bus = dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
/* display control and bulk lists together, for simplicity */
spin_lock_irqsave (&ohci->lock, flags);
@@ -529,8 +483,6 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
- struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct ed **seen, *ed;
unsigned long flags;
@@ -542,9 +494,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
return 0;
seen_count = 0;
- bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
next = buf->page;
size = PAGE_SIZE;
@@ -626,7 +576,6 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
- struct usb_bus *bus;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct ohci_regs __iomem *regs;
@@ -635,9 +584,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
char *next;
u32 rdata;
- bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
- hcd = bus_to_hcd(bus);
- ohci = hcd_to_ohci(hcd);
+ ohci = buf->ohci;
+ hcd = ohci_to_hcd(ohci);
regs = ohci->regs;
next = buf->page;
size = PAGE_SIZE;
@@ -646,7 +594,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
/* dump driver info, then registers in spec order */
- ohci_dbg_sw (ohci, &next, &size,
+ ohci_dbg_nosw(ohci, &next, &size,
"bus %s, device %s\n"
"%s\n"
"%s\n",
@@ -655,7 +603,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
hcd->product_desc,
hcd_name);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
size -= scnprintf (next, size,
"SUSPENDED (no register access)\n");
goto done;
@@ -665,7 +613,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
/* hcca */
if (ohci->hcca)
- ohci_dbg_sw (ohci, &next, &size,
+ ohci_dbg_nosw(ohci, &next, &size,
"hcca frame 0x%04x\n", ohci_frame_no(ohci));
/* other registers mostly affect frame timings */
@@ -697,7 +645,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
temp = scnprintf (next, size, "hub poll timer %s\n",
- ohci_to_hcd(ohci)->poll_rh ? "ON" : "off");
+ HCD_POLL_RH(ohci_to_hcd(ohci)) ? "ON" : "off");
size -= temp;
next += temp;
@@ -710,7 +658,7 @@ done:
return PAGE_SIZE - size;
}
-static struct debug_buffer *alloc_buffer(struct device *dev,
+static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
@@ -718,7 +666,7 @@ static struct debug_buffer *alloc_buffer(struct device *dev,
buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
if (buf) {
- buf->dev = dev;
+ buf->ohci = ohci;
buf->fill_func = fill_func;
mutex_init(&buf->mutex);
}
@@ -810,26 +758,25 @@ static int debug_registers_open(struct inode *inode, struct file *file)
static inline void create_debug_files (struct ohci_hcd *ohci)
{
struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
- struct device *dev = bus->dev;
ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
if (!ohci->debug_dir)
goto dir_error;
ohci->debug_async = debugfs_create_file("async", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_async_fops);
if (!ohci->debug_async)
goto async_error;
ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_periodic_fops);
if (!ohci->debug_periodic)
goto periodic_error;
ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
- ohci->debug_dir, dev,
+ ohci->debug_dir, ohci,
&debug_registers_fops);
if (!ohci->debug_registers)
goto registers_error;
@@ -857,7 +804,5 @@ static inline void remove_debug_files (struct ohci_hcd *ohci)
debugfs_remove(ohci->debug_dir);
}
-#endif
-
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
deleted file mode 100644
index fb3055f084b..00000000000
--- a/drivers/usb/host/ohci-ep93xx.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * OHCI HCD (Host Controller Driver) for USB.
- *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- * (C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus Glue for ep93xx.
- *
- * Written by Christopher Hoover <ch@hpl.hp.com>
- * Based on fragments of previous driver by Russell King et al.
- *
- * Modified for LH7A404 from ohci-sa1111.c
- * by Durgesh Pattamatta <pattamattad@sharpsec.com>
- *
- * Modified for pxa27x from ohci-lh7a404.c
- * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
- *
- * Modified for ep93xx from ohci-pxa27x.c
- * by Lennert Buytenhek <buytenh@wantstofly.org> 28-2-2006
- * Based on an earlier driver by Ray Lehtiniemi
- *
- * This file is licenced under the GPL.
- */
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/signal.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-
-static struct clk *usb_host_clock;
-
-static void ep93xx_start_hc(struct device *dev)
-{
- clk_enable(usb_host_clock);
-}
-
-static void ep93xx_stop_hc(struct device *dev)
-{
- clk_disable(usb_host_clock);
-}
-
-static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
-{
- int retval;
- struct usb_hcd *hcd;
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd(driver, &pdev->dev, "ep93xx");
- if (hcd == NULL)
- return -ENOMEM;
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- usb_put_hcd(hcd);
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- pr_debug("ioremap failed");
- retval = -ENOMEM;
- goto err2;
- }
-
- usb_host_clock = clk_get(&pdev->dev, "usb_host");
- ep93xx_start_hc(&pdev->dev);
-
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED);
- if (retval == 0)
- return retval;
-
- ep93xx_stop_hc(&pdev->dev);
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
- usb_put_hcd(hcd);
-
- return retval;
-}
-
-static void usb_hcd_ep93xx_remove(struct usb_hcd *hcd,
- struct platform_device *pdev)
-{
- usb_remove_hcd(hcd);
- ep93xx_stop_hc(&pdev->dev);
- clk_put(usb_host_clock);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-}
-
-static int __devinit ohci_ep93xx_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- err("can't start %s", hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static struct hc_driver ohci_ep93xx_hc_driver = {
- .description = hcd_name,
- .product_desc = "EP93xx OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
- .start = ohci_ep93xx_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
- .get_frame_number = ohci_get_frame,
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-extern int usb_disabled(void);
-
-static int ohci_hcd_ep93xx_drv_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = -ENODEV;
- if (!usb_disabled())
- ret = usb_hcd_ep93xx_probe(&ohci_ep93xx_hc_driver, pdev);
-
- return ret;
-}
-
-static int ohci_hcd_ep93xx_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_hcd_ep93xx_remove(hcd, pdev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- ep93xx_stop_hc(&pdev->dev);
- hcd->state = HC_STATE_SUSPENDED;
-
- return 0;
-}
-
-static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int status;
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- ep93xx_start_hc(&pdev->dev);
-
- ohci_finish_controller_resume(hcd);
- return 0;
-}
-#endif
-
-
-static struct platform_driver ohci_hcd_ep93xx_driver = {
- .probe = ohci_hcd_ep93xx_drv_probe,
- .remove = ohci_hcd_ep93xx_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
-#ifdef CONFIG_PM
- .suspend = ohci_hcd_ep93xx_drv_suspend,
- .resume = ohci_hcd_ep93xx_drv_resume,
-#endif
- .driver = {
- .name = "ep93xx-ohci",
- .owner = THIS_MODULE,
- },
-};
-
-MODULE_ALIAS("platform:ep93xx-ohci");
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
new file mode 100644
index 00000000000..060a6a41475
--- /dev/null
+++ b/drivers/usb/host/ohci-exynos.c
@@ -0,0 +1,363 @@
+/*
+ * SAMSUNG EXYNOS USB HOST OHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/samsung_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI EXYNOS driver"
+
+static const char hcd_name[] = "ohci-exynos";
+static struct hc_driver __read_mostly exynos_ohci_hc_driver;
+
+#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
+
+#define PHY_NUMBER 3
+
+struct exynos_ohci_hcd {
+ struct clk *clk;
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ struct phy *phy_g[PHY_NUMBER];
+};
+
+static int exynos_ohci_get_phy(struct device *dev,
+ struct exynos_ohci_hcd *exynos_ohci)
+{
+ struct device_node *child;
+ struct phy *phy;
+ int phy_number;
+ int ret = 0;
+
+ exynos_ohci->phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(exynos_ohci->phy)) {
+ ret = PTR_ERR(exynos_ohci->phy);
+ if (ret != -ENXIO && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ } else {
+ exynos_ohci->otg = exynos_ohci->phy->otg;
+ }
+
+ /*
+ * Getting generic phy:
+ * We are keeping both types of phys as a part of transiting OHCI
+ * to generic phy framework, so as to maintain backward compatibilty
+ * with old DTB.
+ * If there are existing devices using DTB files built from them,
+ * to remove the support for old bindings in this driver,
+ * we need to make sure that such devices have their DTBs
+ * updated to ones built from new DTS.
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &phy_number);
+ if (ret) {
+ dev_err(dev, "Failed to parse device tree\n");
+ of_node_put(child);
+ return ret;
+ }
+
+ if (phy_number >= PHY_NUMBER) {
+ dev_err(dev, "Invalid number of PHYs\n");
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ phy = devm_of_phy_get(dev, child, 0);
+ of_node_put(child);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ if (ret != -ENOSYS && ret != -ENODEV) {
+ dev_err(dev, "no usb2 phy configured\n");
+ return ret;
+ }
+ dev_dbg(dev, "Failed to get usb2 phy\n");
+ }
+ exynos_ohci->phy_g[phy_number] = phy;
+ }
+
+ return ret;
+}
+
+static int exynos_ohci_phy_enable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int i;
+ int ret = 0;
+
+ if (!IS_ERR(exynos_ohci->phy))
+ return usb_phy_init(exynos_ohci->phy);
+
+ for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ ret = phy_power_on(exynos_ohci->phy_g[i]);
+ if (ret)
+ for (i--; i >= 0; i--)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ phy_power_off(exynos_ohci->phy_g[i]);
+
+ return ret;
+}
+
+static void exynos_ohci_phy_disable(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int i;
+
+ if (!IS_ERR(exynos_ohci->phy)) {
+ usb_phy_shutdown(exynos_ohci->phy);
+ return;
+ }
+
+ for (i = 0; i < PHY_NUMBER; i++)
+ if (!IS_ERR(exynos_ohci->phy_g[i]))
+ phy_power_off(exynos_ohci->phy_g[i]);
+}
+
+static int exynos_ohci_probe(struct platform_device *pdev)
+{
+ struct exynos_ohci_hcd *exynos_ohci;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int err;
+
+ /*
+ * Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we move to full device tree support this will vanish off.
+ */
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ hcd = usb_create_hcd(&exynos_ohci_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ return -ENOMEM;
+ }
+
+ exynos_ohci = to_exynos_ohci(hcd);
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "samsung,exynos5440-ohci"))
+ goto skip_phy;
+
+ err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci);
+ if (err)
+ goto fail_clk;
+
+skip_phy:
+ exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
+
+ if (IS_ERR(exynos_ohci->clk)) {
+ dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+ err = PTR_ERR(exynos_ohci->clk);
+ goto fail_clk;
+ }
+
+ err = clk_prepare_enable(exynos_ohci->clk);
+ if (err)
+ goto fail_clk;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto fail_io;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail_io;
+ }
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ platform_set_drvdata(pdev, hcd);
+
+ err = exynos_ohci_phy_enable(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable USB phy\n");
+ goto fail_io;
+ }
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail_add_hcd;
+ }
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+
+fail_add_hcd:
+ exynos_ohci_phy_disable(&pdev->dev);
+fail_io:
+ clk_disable_unprepare(exynos_ohci->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+ return err;
+}
+
+static int exynos_ohci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+
+ usb_remove_hcd(hcd);
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ exynos_ohci_phy_disable(&pdev->dev);
+
+ clk_disable_unprepare(exynos_ohci->clk);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static void exynos_ohci_shutdown(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+#ifdef CONFIG_PM
+static int exynos_ohci_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+ int rc = ohci_suspend(hcd, do_wakeup);
+
+ if (rc)
+ return rc;
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ exynos_ohci_phy_disable(dev);
+
+ clk_disable_unprepare(exynos_ohci->clk);
+
+ return 0;
+}
+
+static int exynos_ohci_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+ int ret;
+
+ clk_prepare_enable(exynos_ohci->clk);
+
+ if (exynos_ohci->otg)
+ exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
+
+ ret = exynos_ohci_phy_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable USB phy\n");
+ clk_disable_unprepare(exynos_ohci->clk);
+ return ret;
+ }
+
+ ohci_resume(hcd, false);
+
+ return 0;
+}
+#else
+#define exynos_ohci_suspend NULL
+#define exynos_ohci_resume NULL
+#endif
+
+static const struct ohci_driver_overrides exynos_overrides __initconst = {
+ .extra_priv_size = sizeof(struct exynos_ohci_hcd),
+};
+
+static const struct dev_pm_ops exynos_ohci_pm_ops = {
+ .suspend = exynos_ohci_suspend,
+ .resume = exynos_ohci_resume,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_ohci_match[] = {
+ { .compatible = "samsung,exynos4210-ohci" },
+ { .compatible = "samsung,exynos5440-ohci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_ohci_match);
+#endif
+
+static struct platform_driver exynos_ohci_driver = {
+ .probe = exynos_ohci_probe,
+ .remove = exynos_ohci_remove,
+ .shutdown = exynos_ohci_shutdown,
+ .driver = {
+ .name = "exynos-ohci",
+ .owner = THIS_MODULE,
+ .pm = &exynos_ohci_pm_ops,
+ .of_match_table = of_match_ptr(exynos_ohci_match),
+ }
+};
+static int __init ohci_exynos_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
+ return platform_driver_register(&exynos_ohci_driver);
+}
+module_init(ohci_exynos_init);
+
+static void __exit ohci_exynos_cleanup(void)
+{
+ platform_driver_unregister(&exynos_ohci_driver);
+}
+module_exit(ohci_exynos_cleanup);
+
+MODULE_ALIAS("platform:exynos-ohci");
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 5cf5f1eca4f..f98d03f3144 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,5 +1,7 @@
/*
- * OHCI HCD (Host Controller Driver) for USB.
+ * Open Host Controller Interface (OHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
@@ -32,27 +34,23 @@
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
+#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-#include <linux/reboot.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
/*-------------------------------------------------------------------------*/
-#undef OHCI_VERBOSE_DEBUG /* not always helpful */
-
/* For initializing controller (mask in an HCFS mode too) */
#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
#define OHCI_INTR_INIT \
@@ -76,30 +74,11 @@ static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#include "ohci.h"
+#include "pci-quirks.h"
static void ohci_dump (struct ohci_hcd *ohci, int verbose);
-static int ohci_init (struct ohci_hcd *ohci);
static void ohci_stop (struct usb_hcd *hcd);
-#if defined(CONFIG_PM) || defined(CONFIG_PCI)
-static int ohci_restart (struct ohci_hcd *ohci);
-#endif
-
-#ifdef CONFIG_PCI
-static void quirk_amd_pll(int state);
-static void amd_iso_dev_put(void);
-#else
-static inline void quirk_amd_pll(int state)
-{
- return;
-}
-static inline void amd_iso_dev_put(void)
-{
- return;
-}
-#endif
-
-
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
@@ -118,13 +97,13 @@ static inline void amd_iso_dev_put(void)
/* Some boards misreport power switching/overcurrent */
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
-static int no_handshake = 0;
+static bool no_handshake = 0;
module_param (no_handshake, bool, 0);
MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
@@ -146,10 +125,6 @@ static int ohci_urb_enqueue (
unsigned long flags;
int retval = 0;
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "SUB", usb_pipein(pipe), -EINPROGRESS);
-#endif
-
/* every endpoint has a ed, locate and maybe (re)initialize it */
if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
return -ENOMEM;
@@ -167,7 +142,7 @@ static int ohci_urb_enqueue (
// case PIPE_INTERRUPT:
// case PIPE_BULK:
default:
- /* one TD for every 4096 Bytes (can be upto 8K) */
+ /* one TD for every 4096 Bytes (can be up to 8K) */
size += urb->transfer_buffer_length / 4096;
/* ... and for any remaining bytes ... */
if ((urb->transfer_buffer_length % 4096) != 0)
@@ -208,11 +183,11 @@ static int ohci_urb_enqueue (
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
retval = -ENODEV;
goto fail;
}
- if (!HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
@@ -235,13 +210,47 @@ static int ohci_urb_enqueue (
frame &= ~(ed->interval - 1);
frame |= ed->branch;
urb->start_frame = frame;
-
- /* yes, only URB_ISO_ASAP is supported, and
- * urb->start_frame is never used as input.
+ ed->last_iso = frame + ed->interval * (size - 1);
+ }
+ } else if (ed->type == PIPE_ISOCHRONOUS) {
+ u16 next = ohci_frame_no(ohci) + 1;
+ u16 frame = ed->last_iso + ed->interval;
+ u16 length = ed->interval * (size - 1);
+
+ /* Behind the scheduling threshold? */
+ if (unlikely(tick_before(frame, next))) {
+
+ /* URB_ISO_ASAP: Round up to the first available slot */
+ if (urb->transfer_flags & URB_ISO_ASAP) {
+ frame += (next - frame + ed->interval - 1) &
+ -ed->interval;
+
+ /*
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
+ } else {
+ /*
+ * Some OHCI hardware doesn't handle late TDs
+ * correctly. After retiring them it proceeds
+ * to the next ED instead of the next TD.
+ * Therefore we have to omit the late TDs
+ * entirely.
+ */
+ urb_priv->td_cnt = DIV_ROUND_UP(
+ (u16) (next - frame),
+ ed->interval);
+ if (urb_priv->td_cnt >= urb_priv->length) {
+ ++urb_priv->td_cnt; /* Mark it */
+ ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+ urb, frame, length,
+ next);
+ }
+ }
}
- } else if (ed->type == PIPE_ISOCHRONOUS)
- urb->start_frame = ed->last_iso + ed->interval;
+ urb->start_frame = frame;
+ ed->last_iso = frame + length;
+ }
/* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed
@@ -269,15 +278,11 @@ static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unsigned long flags;
int rc;
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "UNLINK", 1, status);
-#endif
-
spin_lock_irqsave (&ohci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
- } else if (HC_IS_RUNNING(hcd->state)) {
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
@@ -324,7 +329,7 @@ ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
rescan:
spin_lock_irqsave (&ohci->lock, flags);
- if (!HC_IS_RUNNING (hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
@@ -366,7 +371,6 @@ sanitize:
}
ep->hcpriv = NULL;
spin_unlock_irqrestore (&ohci->lock, flags);
- return;
}
static int ohci_get_frame (struct usb_hcd *hcd)
@@ -381,6 +385,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
@@ -393,10 +398,14 @@ ohci_shutdown (struct usb_hcd *hcd)
struct ohci_hcd *ohci;
ohci = hcd_to_ohci (hcd);
- ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- ohci_usb_reset (ohci);
- /* flush the writes */
- (void) ohci_readl (ohci, &ohci->regs->control);
+ ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
+
+ /* Software reset, after which the controller goes into SUSPEND */
+ ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
+ ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
+ udelay(10);
+
+ ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
}
static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
@@ -500,7 +509,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
@@ -571,17 +580,17 @@ static int ohci_init (struct ohci_hcd *ohci)
*/
static int ohci_run (struct ohci_hcd *ohci)
{
- u32 mask, temp;
+ u32 mask, val;
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
- temp = ohci_readl (ohci, &ohci->regs->fminterval);
- ohci->fminterval = temp & 0x3fff;
+ val = ohci_readl (ohci, &ohci->regs->fminterval);
+ ohci->fminterval = val & 0x3fff;
if (ohci->fminterval != FI)
ohci_dbg (ohci, "fminterval delta %d\n",
ohci->fminterval - FI);
@@ -600,25 +609,25 @@ static int ohci_run (struct ohci_hcd *ohci)
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
- temp = 0;
+ val = 0;
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESUME;
- temp = 10 /* msec wait */;
+ val = 10 /* msec wait */;
break;
// case OHCI_USB_RESET:
default:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESET;
- temp = 50 /* msec wait */;
+ val = 50 /* msec wait */;
break;
}
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush the writes
(void) ohci_readl (ohci, &ohci->regs->control);
- msleep(temp);
+ msleep(val);
memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
@@ -628,9 +637,9 @@ static int ohci_run (struct ohci_hcd *ohci)
retry:
/* HC Reset requires max 10 us delay */
ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
- temp = 30; /* ... allow extra time */
+ val = 30; /* ... allow extra time */
while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
- if (--temp == 0) {
+ if (--val == 0) {
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "USB HC reset timed out!\n");
return -1;
@@ -681,14 +690,14 @@ retry:
}
/* use rhsc irqs after khubd is fully initialized */
- hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
hcd->uses_new_polling = 1;
/* start controller operations */
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
- hcd->state = HC_STATE_RUNNING;
+ ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
@@ -699,23 +708,23 @@ retry:
ohci_writel (ohci, mask, &ohci->regs->intrenable);
/* handle root hub init quirks ... */
- temp = roothub_a (ohci);
- temp &= ~(RH_A_PSM | RH_A_OCPM);
+ val = roothub_a (ohci);
+ val &= ~(RH_A_PSM | RH_A_OCPM);
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
/* NSC 87560 and maybe others */
- temp |= RH_A_NOCP;
- temp &= ~(RH_A_POTPGT | RH_A_NPS);
- ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+ val |= RH_A_NOCP;
+ val &= ~(RH_A_POTPGT | RH_A_NPS);
+ ohci_writel (ohci, val, &ohci->regs->roothub.a);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
- temp |= RH_A_NPS;
- ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+ val |= RH_A_NPS;
+ ohci_writel (ohci, val, &ohci->regs->roothub.a);
}
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
- ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM,
+ ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
@@ -724,8 +733,7 @@ retry:
spin_unlock_irq (&ohci->lock);
// POTPGT delay is bits 24-31, in 2 ms units.
- mdelay ((temp >> 23) & 0x1fe);
- hcd->state = HC_STATE_RUNNING;
+ mdelay ((val >> 23) & 0x1fe);
if (quirk_zfmicro(ohci)) {
/* Create timer to watch for bad queue state on ZF Micro */
@@ -741,6 +749,32 @@ retry:
return 0;
}
+/* ohci_setup routine for generic controller initialization */
+
+int ohci_setup(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ ohci_hcd_init(ohci);
+
+ return ohci_init(ohci);
+}
+EXPORT_SYMBOL_GPL(ohci_setup);
+
+/* ohci_start routine for generic controller start of all OHCI bus glue */
+static int ohci_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ ohci_err(ohci, "can't start\n");
+ ohci_stop(hcd);
+ }
+ return ret;
+}
+
/*-------------------------------------------------------------------------*/
/* an interrupt happens */
@@ -761,8 +795,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
+ usb_hc_died(hcd);
return IRQ_HANDLED;
}
@@ -770,7 +805,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
ints &= ohci_readl(ohci, &regs->intrenable);
/* interrupt for some other device? */
- if (ints == 0)
+ if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
@@ -785,8 +820,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
schedule_work (&ohci->nec_work);
} else {
- disable (ohci);
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
+ ohci->rh_state = OHCI_RH_HALTED;
+ usb_hc_died(hcd);
}
ohci_dump (ohci, 1);
@@ -794,7 +830,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
}
if (ints & OHCI_INTR_RHSC) {
- ohci_vdbg(ohci, "rhsc\n");
+ ohci_dbg(ohci, "rhsc\n");
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
&regs->intrstatus);
@@ -816,9 +852,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* this might not happen.
*/
else if (ints & OHCI_INTR_RD) {
- ohci_vdbg(ohci, "resume detect\n");
+ ohci_dbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, &regs->intrstatus);
- hcd->poll_rh = 1;
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
spin_lock (&ohci->lock);
ohci_rh_resume (ohci);
@@ -869,11 +905,11 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
if ((ints & OHCI_INTR_SF) != 0
&& !ohci->ed_rm_list
&& !ohci->ed_to_check
- && HC_IS_RUNNING(hcd->state))
+ && ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
spin_unlock (&ohci->lock);
- if (HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, &regs->intrstatus);
ohci_writel (ohci, OHCI_INTR_MIE, &regs->intrenable);
// flush those writes
@@ -891,17 +927,18 @@ static void ohci_stop (struct usb_hcd *hcd)
ohci_dump (ohci, 1);
- flush_scheduled_work();
+ if (quirk_nec(ohci))
+ flush_work(&ohci->nec_work);
- ohci_usb_reset (ohci);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+ ohci_usb_reset(ohci);
free_irq(hcd->irq, hcd);
- hcd->irq = -1;
+ hcd->irq = 0;
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
if (quirk_amdiso(ohci))
- amd_iso_dev_put();
+ usb_amd_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
@@ -919,14 +956,15 @@ static void ohci_stop (struct usb_hcd *hcd)
#if defined(CONFIG_PM) || defined(CONFIG_PCI)
/* must not be called from interrupt context */
-static int ohci_restart (struct ohci_hcd *ohci)
+int ohci_restart(struct ohci_hcd *ohci)
{
int temp;
int i;
struct urb_priv *priv;
+ ohci_init(ohci);
spin_lock_irq(&ohci->lock);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
@@ -978,82 +1016,172 @@ static int ohci_restart (struct ohci_hcd *ohci)
ohci_dbg(ohci, "restart complete\n");
return 0;
}
+EXPORT_SYMBOL_GPL(ohci_restart);
#endif
-/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_PM
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE ("GPL");
+int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+ unsigned long flags;
+ int rc = 0;
-#ifdef CONFIG_PCI
-#include "ohci-pci.c"
-#define PCI_DRIVER ohci_pci_driver
-#endif
+ /* Disable irq emission and mark HW unaccessible. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ */
+ spin_lock_irqsave (&ohci->lock, flags);
+ ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+ (void)ohci_readl(ohci, &ohci->regs->intrdisable);
-#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
-#include "ohci-sa1111.c"
-#define SA1111_DRIVER ohci_hcd_sa1111_driver
-#endif
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_unlock_irqrestore (&ohci->lock, flags);
-#ifdef CONFIG_ARCH_S3C2410
-#include "ohci-s3c2410.c"
-#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
-#endif
+ synchronize_irq(hcd->irq);
-#ifdef CONFIG_ARCH_OMAP
-#include "ohci-omap.c"
-#define PLATFORM_DRIVER ohci_hcd_omap_driver
-#endif
+ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+ ohci_resume(hcd, false);
+ rc = -EBUSY;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ohci_suspend);
-#ifdef CONFIG_ARCH_LH7A404
-#include "ohci-lh7a404.c"
-#define PLATFORM_DRIVER ohci_hcd_lh7a404_driver
-#endif
-#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
-#include "ohci-pxa27x.c"
-#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
-#endif
+int ohci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int port;
+ bool need_reinit = false;
-#ifdef CONFIG_ARCH_EP93XX
-#include "ohci-ep93xx.c"
-#define PLATFORM_DRIVER ohci_hcd_ep93xx_driver
-#endif
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-#ifdef CONFIG_SOC_AU1X00
-#include "ohci-au1xxx.c"
-#define PLATFORM_DRIVER ohci_hcd_au1xxx_driver
-#endif
+ /* Make sure resume from hibernation re-enumerates everything */
+ if (hibernated)
+ ohci_usb_reset(ohci);
-#ifdef CONFIG_PNX8550
-#include "ohci-pnx8550.c"
-#define PLATFORM_DRIVER ohci_hcd_pnx8550_driver
-#endif
+ /* See if the controller is already running or has been reset */
+ ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
+ need_reinit = true;
+ } else {
+ switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_OPER:
+ case OHCI_USB_RESET:
+ need_reinit = true;
+ }
+ }
-#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
-#include "ohci-ppc-soc.c"
-#define PLATFORM_DRIVER ohci_hcd_ppc_soc_driver
-#endif
+ /* If needed, reinitialize and suspend the root hub */
+ if (need_reinit) {
+ spin_lock_irq(&ohci->lock);
+ ohci_rh_resume(ohci);
+ ohci_rh_suspend(ohci, 0);
+ spin_unlock_irq(&ohci->lock);
+ }
+
+ /* Normally just turn on port power and enable interrupts */
+ else {
+ ohci_dbg(ohci, "powerup ports\n");
+ for (port = 0; port < ohci->num_ports; port++)
+ ohci_writel(ohci, RH_PS_PPS,
+ &ohci->regs->roothub.portstatus[port]);
+
+ ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrenable);
+ ohci_readl(ohci, &ohci->regs->intrenable);
+ msleep(20);
+ }
+
+ usb_hcd_resume_root_hub(hcd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ohci_resume);
-#ifdef CONFIG_ARCH_AT91
-#include "ohci-at91.c"
-#define PLATFORM_DRIVER ohci_hcd_at91_driver
#endif
-#ifdef CONFIG_ARCH_PNX4008
-#include "ohci-pnx4008.c"
-#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Generic structure: This gets copied for platform drivers so that
+ * individual entries can be overridden as needed.
+ */
+
+static const struct hc_driver ohci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY | HCD_USB11,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_setup,
+ .start = ohci_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+void ohci_init_driver(struct hc_driver *drv,
+ const struct ohci_driver_overrides *over)
+{
+ /* Copy the generic table to drv and then apply the overrides */
+ *drv = ohci_hc_driver;
+
+ if (over) {
+ drv->product_desc = over->product_desc;
+ drv->hcd_priv_size += over->extra_priv_size;
+ if (over->reset)
+ drv->reset = over->reset;
+ }
+}
+EXPORT_SYMBOL_GPL(ohci_init_driver);
+
+/*-------------------------------------------------------------------------*/
+
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE ("GPL");
-#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763)
-#include "ohci-sh.c"
-#define PLATFORM_DRIVER ohci_hcd_sh_driver
+#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
+#include "ohci-sa1111.c"
+#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
+#ifdef CONFIG_USB_OHCI_HCD_DAVINCI
+#include "ohci-da8xx.c"
+#define DAVINCI_PLATFORM_DRIVER ohci_hcd_da8xx_driver
+#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
#include "ohci-ppc-of.c"
@@ -1065,11 +1193,6 @@ MODULE_LICENSE ("GPL");
#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
#endif
-#ifdef CONFIG_USB_OHCI_HCD_SSB
-#include "ohci-ssb.c"
-#define SSB_OHCI_DRIVER ssb_ohci_driver
-#endif
-
#ifdef CONFIG_MFD_SM501
#include "ohci-sm501.c"
#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
@@ -1080,15 +1203,19 @@ MODULE_LICENSE ("GPL");
#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
#endif
-#if !defined(PCI_DRIVER) && \
- !defined(PLATFORM_DRIVER) && \
- !defined(OF_PLATFORM_DRIVER) && \
- !defined(SA1111_DRIVER) && \
- !defined(PS3_SYSTEM_BUS_DRIVER) && \
- !defined(SM501_OHCI_DRIVER) && \
- !defined(TMIO_OHCI_DRIVER) && \
- !defined(SSB_OHCI_DRIVER)
-#error "missing bus glue for ohci-hcd"
+#ifdef CONFIG_MACH_JZ4740
+#include "ohci-jz4740.c"
+#define PLATFORM_DRIVER ohci_hcd_jz4740_driver
+#endif
+
+#ifdef CONFIG_USB_OCTEON_OHCI
+#include "ohci-octeon.c"
+#define PLATFORM_DRIVER ohci_octeon_driver
+#endif
+
+#ifdef CONFIG_TILE_USB
+#include "ohci-tilegx.c"
+#define PLATFORM_DRIVER ohci_hcd_tilegx_driver
#endif
static int __init ohci_hcd_mod_init(void)
@@ -1103,13 +1230,11 @@ static int __init ohci_hcd_mod_init(void)
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
-#ifdef DEBUG
- ohci_debug_root = debugfs_create_dir("ohci", NULL);
+ ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
}
-#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
@@ -1124,7 +1249,7 @@ static int __init ohci_hcd_mod_init(void)
#endif
#ifdef OF_PLATFORM_DRIVER
- retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
+ retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto error_of_platform;
#endif
@@ -1135,18 +1260,6 @@ static int __init ohci_hcd_mod_init(void)
goto error_sa1111;
#endif
-#ifdef PCI_DRIVER
- retval = pci_register_driver(&PCI_DRIVER);
- if (retval < 0)
- goto error_pci;
-#endif
-
-#ifdef SSB_OHCI_DRIVER
- retval = ssb_driver_register(&SSB_OHCI_DRIVER);
- if (retval)
- goto error_ssb;
-#endif
-
#ifdef SM501_OHCI_DRIVER
retval = platform_driver_register(&SM501_OHCI_DRIVER);
if (retval < 0)
@@ -1159,9 +1272,19 @@ static int __init ohci_hcd_mod_init(void)
goto error_tmio;
#endif
+#ifdef DAVINCI_PLATFORM_DRIVER
+ retval = platform_driver_register(&DAVINCI_PLATFORM_DRIVER);
+ if (retval < 0)
+ goto error_davinci;
+#endif
+
return retval;
/* Error path */
+#ifdef DAVINCI_PLATFORM_DRIVER
+ platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
+ error_davinci:
+#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
error_tmio:
@@ -1170,20 +1293,12 @@ static int __init ohci_hcd_mod_init(void)
platform_driver_unregister(&SM501_OHCI_DRIVER);
error_sm501:
#endif
-#ifdef SSB_OHCI_DRIVER
- ssb_driver_unregister(&SSB_OHCI_DRIVER);
- error_ssb:
-#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
- error_pci:
-#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
error_sa1111:
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
error_of_platform:
#endif
#ifdef PLATFORM_DRIVER
@@ -1194,11 +1309,9 @@ static int __init ohci_hcd_mod_init(void)
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
#endif
-#ifdef DEBUG
debugfs_remove(ohci_debug_root);
ohci_debug_root = NULL;
error_debug:
-#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
return retval;
@@ -1207,23 +1320,20 @@ module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
+#ifdef DAVINCI_PLATFORM_DRIVER
+ platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
+#endif
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
#endif
-#ifdef SSB_OHCI_DRIVER
- ssb_driver_unregister(&SSB_OHCI_DRIVER);
-#endif
-#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
-#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
- of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
+ platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
@@ -1231,9 +1341,7 @@ static void __exit ohci_hcd_mod_exit(void)
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
-#ifdef DEBUG
debugfs_remove(ohci_debug_root);
-#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ohci_hcd_mod_exit);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 32bbce9718f..b4940de1eba 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -90,6 +90,24 @@ __acquires(ohci->lock)
dl_done_list (ohci);
finish_unlinks (ohci, ohci_frame_no(ohci));
+ /*
+ * Some controllers don't handle "global" suspend properly if
+ * there are unsuspended ports. For these controllers, put all
+ * the enabled ports into suspend before suspending the root hub.
+ */
+ if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
+ __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
+ int i;
+ unsigned temp;
+
+ for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
+ temp = ohci_readl(ohci, portstat);
+ if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
+ RH_PS_PES)
+ ohci_writel(ohci, RH_PS_PSS, portstat);
+ }
+ }
+
/* maybe resume can wake root hub */
if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
ohci->hc_control |= OHCI_CTRL_RWE;
@@ -111,6 +129,7 @@ __acquires(ohci->lock)
if (!autostop) {
ohci->next_statechange = jiffies + msecs_to_jiffies (5);
ohci->autostop = 0;
+ ohci->rh_state = OHCI_RH_SUSPENDED;
}
done:
@@ -140,7 +159,7 @@ __acquires(ohci->lock)
if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
/* this can happen after resuming a swsusp snapshot */
- if (hcd->state == HC_STATE_RESUMING) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
ohci->hc_control);
status = -EBUSY;
@@ -175,7 +194,6 @@ __acquires(ohci->lock)
if (status == -EBUSY) {
if (!autostopped) {
spin_unlock_irq (&ohci->lock);
- (void) ohci_init (ohci);
status = ohci_restart (ohci);
usb_root_hub_lost_power(hcd->self.root_hub);
@@ -212,10 +230,11 @@ __acquires(ohci->lock)
/* Sometimes PCI D3 suspend trashes frame timings ... */
periodic_reinit (ohci);
- /* the following code is executed with ohci->lock held and
- * irqs disabled if and only if autostopped is true
+ /*
+ * The following code is executed with ohci->lock held and
+ * irqs disabled if and only if autostopped is true. This
+ * will cause sparse to warn about a "context imbalance".
*/
-
skip_resume:
/* interrupts might have been disabled */
ohci_writel (ohci, OHCI_INTR_INIT, &ohci->regs->intrenable);
@@ -274,6 +293,7 @@ skip_resume:
(void) ohci_readl (ohci, &ohci->regs->control);
}
+ ohci->rh_state = OHCI_RH_RUNNING;
return 0;
}
@@ -284,7 +304,7 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)
spin_lock_irq (&ohci->lock);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_suspend (ohci, 0);
@@ -302,7 +322,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
spin_lock_irq (&ohci->lock);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
rc = -ESHUTDOWN;
else
rc = ohci_rh_resume (ohci);
@@ -314,49 +334,6 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
return rc;
}
-/* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int port;
- bool need_reinit = false;
-
- /* See if the controller is already running or has been reset */
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
- if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
- need_reinit = true;
- } else {
- switch (ohci->hc_control & OHCI_CTRL_HCFS) {
- case OHCI_USB_OPER:
- case OHCI_USB_RESET:
- need_reinit = true;
- }
- }
-
- /* If needed, reinitialize and suspend the root hub */
- if (need_reinit) {
- spin_lock_irq(&ohci->lock);
- hcd->state = HC_STATE_RESUMING;
- ohci_rh_resume(ohci);
- hcd->state = HC_STATE_QUIESCING;
- ohci_rh_suspend(ohci, 0);
- hcd->state = HC_STATE_SUSPENDED;
- spin_unlock_irq(&ohci->lock);
- }
-
- /* Normally just turn on port power and enable interrupts */
- else {
- ohci_dbg(ohci, "powerup ports\n");
- for (port = 0; port < ohci->num_ports; port++)
- ohci_writel(ohci, RH_PS_PPS,
- &ohci->regs->roothub.portstatus[port]);
-
- ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrenable);
- ohci_readl(ohci, &ohci->regs->intrenable);
- msleep(20);
- }
-}
-
/* Carry out polling-, autostop-, and autoresume-related state changes */
static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected, int rhsc_status)
@@ -364,7 +341,7 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int poll_rh = 1;
int rhsc_enable;
- /* Some broken controllers never turn off RHCS in the interrupt
+ /* Some broken controllers never turn off RHSC in the interrupt
* status register. For their sake we won't re-enable RHSC
* interrupts if the interrupt bit is already active.
*/
@@ -479,8 +456,7 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
/* build "status change" packet (one or two bytes) from HC registers */
-static int
-ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
+int ohci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int i, changed = 0, length = 1;
@@ -489,7 +465,7 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
unsigned long flags;
spin_lock_irqsave (&ohci->lock, flags);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
+ if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
/* undocumented erratum seen on at least rev D */
@@ -533,14 +509,19 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
}
}
- hcd->poll_rh = ohci_root_hub_state_changes(ohci, changed,
- any_connected, rhsc_status);
+ if (ohci_root_hub_state_changes(ohci, changed,
+ any_connected, rhsc_status))
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
done:
spin_unlock_irqrestore (&ohci->lock, flags);
return changed ? length : 0;
}
+EXPORT_SYMBOL_GPL(ohci_hub_status_data);
/*-------------------------------------------------------------------------*/
@@ -569,17 +550,18 @@ ohci_hub_descriptor (
temp |= 0x0010;
else if (rh & RH_A_OCPM) /* per-port overcurrent reporting? */
temp |= 0x0008;
- desc->wHubCharacteristics = (__force __u16)cpu_to_hc16(ohci, temp);
+ desc->wHubCharacteristics = cpu_to_le16(temp);
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
rh = roothub_b (ohci);
- memset(desc->bitmap, 0xff, sizeof(desc->bitmap));
- desc->bitmap [0] = rh & RH_B_DR;
+ memset(desc->u.hs.DeviceRemovable, 0xff,
+ sizeof(desc->u.hs.DeviceRemovable));
+ desc->u.hs.DeviceRemovable[0] = rh & RH_B_DR;
if (ohci->num_ports > 7) {
- desc->bitmap [1] = (rh & RH_B_DR) >> 8;
- desc->bitmap [2] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = (rh & RH_B_DR) >> 8;
+ desc->u.hs.DeviceRemovable[2] = 0xff;
} else
- desc->bitmap [1] = 0xff;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
}
/*-------------------------------------------------------------------------*/
@@ -616,14 +598,8 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
/* See usb 7.1.7.5: root hubs must issue at least 50 msec reset signaling,
* not necessarily continuous ... to guard against resume signaling.
- * The short timeout is safe for non-root hubs, and is backward-compatible
- * with earlier Linux hosts.
*/
-#ifdef CONFIG_USB_SUSPEND
#define PORT_RESET_MSEC 50
-#else
-#define PORT_RESET_MSEC 10
-#endif
/* this timer value might be vendor-specific ... */
#define PORT_RESET_HW_MSEC 10
@@ -688,7 +664,7 @@ static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port)
return 0;
}
-static int ohci_hub_control (
+int ohci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
@@ -697,11 +673,11 @@ static int ohci_hub_control (
u16 wLength
) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ports = hcd_to_bus (hcd)->root_hub->maxchild;
+ int ports = ohci->num_ports;
u32 temp;
int retval = 0;
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)))
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
return -ESHUTDOWN;
switch (typeReq) {
@@ -767,10 +743,8 @@ static int ohci_hub_control (
temp = roothub_portstatus (ohci, wIndex);
put_unaligned_le32(temp, buf);
-#ifndef OHCI_VERBOSE_DEBUG
- if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
-#endif
- dbg_port (ohci, "GetStatus", wIndex, temp);
+ if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
+ dbg_port(ohci, "GetStatus", wIndex, temp);
break;
case SetHubFeature:
switch (wValue) {
@@ -816,4 +790,4 @@ error:
}
return retval;
}
-
+EXPORT_SYMBOL_GPL(ohci_hub_control);
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
new file mode 100644
index 00000000000..c2c221a332e
--- /dev/null
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+struct jz4740_ohci_hcd {
+ struct ohci_hcd ohci_hcd;
+
+ struct regulator *vbus;
+ bool vbus_enabled;
+ struct clk *clk;
+};
+
+static inline struct jz4740_ohci_hcd *hcd_to_jz4740_hcd(struct usb_hcd *hcd)
+{
+ return (struct jz4740_ohci_hcd *)(hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *jz4740_hcd_to_hcd(struct jz4740_ohci_hcd *jz4740_ohci)
+{
+ return container_of((void *)jz4740_ohci, struct usb_hcd, hcd_priv);
+}
+
+static int ohci_jz4740_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ohci->num_ports = 1;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ dev_err(hcd->self.controller, "Can not start %s",
+ hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+ return 0;
+}
+
+static int ohci_jz4740_set_vbus_power(struct jz4740_ohci_hcd *jz4740_ohci,
+ bool enabled)
+{
+ int ret = 0;
+
+ if (!jz4740_ohci->vbus)
+ return 0;
+
+ if (enabled && !jz4740_ohci->vbus_enabled) {
+ ret = regulator_enable(jz4740_ohci->vbus);
+ if (ret)
+ dev_err(jz4740_hcd_to_hcd(jz4740_ohci)->self.controller,
+ "Could not power vbus\n");
+ } else if (!enabled && jz4740_ohci->vbus_enabled) {
+ ret = regulator_disable(jz4740_ohci->vbus);
+ }
+
+ if (ret == 0)
+ jz4740_ohci->vbus_enabled = enabled;
+
+ return ret;
+}
+
+static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
+ int ret = 0;
+
+ switch (typeReq) {
+ case SetPortFeature:
+ if (wValue == USB_PORT_FEAT_POWER)
+ ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true);
+ break;
+ case ClearPortFeature:
+ if (wValue == USB_PORT_FEAT_POWER)
+ ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
+
+
+static const struct hc_driver ohci_jz4740_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "JZ4740 OHCI",
+ .hcd_priv_size = sizeof(struct jz4740_ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .start = ohci_jz4740_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_jz4740_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+
+static int jz4740_ohci_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct usb_hcd *hcd;
+ struct jz4740_ohci_hcd *jz4740_ohci;
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get platform irq\n");
+ return irq;
+ }
+
+ hcd = usb_create_hcd(&ohci_jz4740_hc_driver, &pdev->dev, "jz4740");
+ if (!hcd) {
+ dev_err(&pdev->dev, "Failed to create hcd.\n");
+ return -ENOMEM;
+ }
+
+ jz4740_ohci = hcd_to_jz4740_hcd(hcd);
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err_free;
+ }
+
+ jz4740_ohci->clk = devm_clk_get(&pdev->dev, "uhc");
+ if (IS_ERR(jz4740_ohci->clk)) {
+ ret = PTR_ERR(jz4740_ohci->clk);
+ dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
+ goto err_free;
+ }
+
+ jz4740_ohci->vbus = devm_regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(jz4740_ohci->vbus))
+ jz4740_ohci->vbus = NULL;
+
+
+ clk_set_rate(jz4740_ohci->clk, 48000000);
+ clk_enable(jz4740_ohci->clk);
+ if (jz4740_ohci->vbus)
+ ohci_jz4740_set_vbus_power(jz4740_ohci, true);
+
+ platform_set_drvdata(pdev, hcd);
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ ret = usb_add_hcd(hcd, irq, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret);
+ goto err_disable;
+ }
+ device_wakeup_enable(hcd->self.controller);
+
+ return 0;
+
+err_disable:
+ if (jz4740_ohci->vbus)
+ regulator_disable(jz4740_ohci->vbus);
+ clk_disable(jz4740_ohci->clk);
+
+err_free:
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+static int jz4740_ohci_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
+
+ usb_remove_hcd(hcd);
+
+ if (jz4740_ohci->vbus)
+ regulator_disable(jz4740_ohci->vbus);
+
+ clk_disable(jz4740_ohci->clk);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver ohci_hcd_jz4740_driver = {
+ .probe = jz4740_ohci_probe,
+ .remove = jz4740_ohci_remove,
+ .driver = {
+ .name = "jz4740-ohci",
+ .owner = THIS_MODULE,
+ },
+};
+
+MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
deleted file mode 100644