aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/musb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb')
-rw-r--r--drivers/usb/musb/Kconfig235
-rw-r--r--drivers/usb/musb/Makefile83
-rw-r--r--drivers/usb/musb/am35x.c623
-rw-r--r--drivers/usb/musb/blackfin.c501
-rw-r--r--drivers/usb/musb/blackfin.h39
-rw-r--r--drivers/usb/musb/cppi_dma.c215
-rw-r--r--drivers/usb/musb/cppi_dma.h7
-rw-r--r--drivers/usb/musb/da8xx.c590
-rw-r--r--drivers/usb/musb/davinci.c366
-rw-r--r--drivers/usb/musb/davinci.h23
-rw-r--r--drivers/usb/musb/jz4740.c201
-rw-r--r--drivers/usb/musb/musb_am335x.c43
-rw-r--r--drivers/usb/musb/musb_core.c1770
-rw-r--r--drivers/usb/musb/musb_core.h359
-rw-r--r--drivers/usb/musb/musb_cppi41.c744
-rw-r--r--drivers/usb/musb/musb_debug.h24
-rw-r--r--drivers/usb/musb/musb_debugfs.c276
-rw-r--r--drivers/usb/musb/musb_dma.h38
-rw-r--r--drivers/usb/musb/musb_dsps.c830
-rw-r--r--drivers/usb/musb/musb_gadget.c1419
-rw-r--r--drivers/usb/musb/musb_gadget.h59
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c232
-rw-r--r--drivers/usb/musb/musb_host.c1511
-rw-r--r--drivers/usb/musb/musb_host.h70
-rw-r--r--drivers/usb/musb/musb_io.h23
-rw-r--r--drivers/usb/musb/musb_regs.h162
-rw-r--r--drivers/usb/musb/musb_virthub.c196
-rw-r--r--drivers/usb/musb/musbhsdma.c177
-rw-r--r--drivers/usb/musb/musbhsdma.h30
-rw-r--r--drivers/usb/musb/omap2430.c683
-rw-r--r--drivers/usb/musb/omap2430.h34
-rw-r--r--drivers/usb/musb/tusb6010.c387
-rw-r--r--drivers/usb/musb/tusb6010.h9
-rw-r--r--drivers/usb/musb/tusb6010_omap.c98
-rw-r--r--drivers/usb/musb/ux500.c399
-rw-r--r--drivers/usb/musb/ux500_dma.c412
36 files changed, 9490 insertions, 3378 deletions
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 9985db08e7d..06cc5d6ea68 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -3,16 +3,10 @@
# for silicon based on Mentor Graphics INVENTRA designs
#
-comment "Enable Host or Gadget support to see Inventra options"
- depends on !USB && USB_GADGET=n
-
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
- depends on (USB || USB_GADGET) && HAVE_CLK
- depends on !SUPERH
- select TWL4030_USB if MACH_OMAP_3430SDP
- select USB_OTG_UTILS
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+ depends on (USB || USB_GADGET)
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -20,8 +14,8 @@ config USB_MUSB_HDRC
it's being used with, including the USB peripheral role,
or the USB host role, or both.
- Texas Instruments parts using this IP include DaVinci 644x,
- OMAP 243x, OMAP 343x, and TUSB 6010.
+ Texas Instruments families using this IP include DaVinci
+ (35x, 644x ...), OMAP 243x, OMAP 3, and TUSB 6010.
Analog Devices parts using this IP include Blackfin BF54x,
BF525 and BF527.
@@ -29,153 +23,144 @@ config USB_MUSB_HDRC
If you do not know what this is, please say N.
To compile this driver as a module, choose M here; the
- module will be called "musb_hdrc".
-
-config USB_MUSB_SOC
- boolean
- depends on USB_MUSB_HDRC
- default y if ARCH_DAVINCI
- default y if ARCH_OMAP2430
- default y if ARCH_OMAP34XX
- default y if (BF54x && !BF544)
- default y if (BF52x && !BF522 && !BF523)
+ module will be called "musb-hdrc".
-comment "DaVinci 644x USB support"
- depends on USB_MUSB_HDRC && ARCH_DAVINCI
+if USB_MUSB_HDRC
-comment "OMAP 243x high speed USB support"
- depends on USB_MUSB_HDRC && ARCH_OMAP2430
+choice
+ bool "MUSB Mode Selection"
+ default USB_MUSB_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_MUSB_HOST if (USB && !USB_GADGET)
+ default USB_MUSB_GADGET if (!USB && USB_GADGET)
-comment "OMAP 343x high speed USB support"
- depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+config USB_MUSB_HOST
+ bool "Host only mode"
+ depends on USB=y || USB=USB_MUSB_HDRC
+ help
+ Select this when you want to use MUSB in host mode only,
+ thereby the gadget feature will be regressed.
-comment "Blackfin high speed USB Support"
- depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523))
+config USB_MUSB_GADGET
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC
+ depends on HAS_DMA
+ help
+ Select this when you want to use MUSB in gadget mode only,
+ thereby the host feature will be regressed.
-config USB_TUSB6010
- boolean "TUSB 6010 support"
- depends on USB_MUSB_HDRC && !USB_MUSB_SOC
- default y
+config USB_MUSB_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on ((USB=y || USB=USB_MUSB_HDRC) && (USB_GADGET=y || USB_GADGET=USB_MUSB_HDRC))
+ depends on HAS_DMA
help
- The TUSB 6010 chip, from Texas Instruments, connects a discrete
- HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
- (a high speed serial link). It can use system-specific external
- DMA controllers.
+ This is the default mode of working of MUSB controller where
+ both host and gadget features are enabled.
+
+endchoice
choice
- prompt "Driver Mode"
- depends on USB_MUSB_HDRC
- help
- Dual-Role devices can support both host and peripheral roles,
- as well as a the special "OTG Device" role which can switch
- between both roles as needed.
+ prompt "Platform Glue Layer"
-# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
-# OTG needs both roles, not just USB_MUSB_HOST.
-config USB_MUSB_HOST
- depends on USB
- bool "USB Host"
- help
- Say Y here if your system supports the USB host role.
- If it has a USB "A" (rectangular), "Mini-A" (uncommon),
- or "Mini-AB" connector, it supports the host role.
- (With a "Mini-AB" connector, you should enable USB OTG.)
-
-# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
-# side support ... OTG needs both roles
-config USB_MUSB_PERIPHERAL
- depends on USB_GADGET
- bool "USB Peripheral (gadget stack)"
- select USB_GADGET_MUSB_HDRC
- help
- Say Y here if your system supports the USB peripheral role.
- If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
- connector, it supports the peripheral role.
- (With a "Mini-AB" connector, you should enable USB OTG.)
-
-config USB_MUSB_OTG
- depends on USB && USB_GADGET && PM && EXPERIMENTAL
- bool "Both host and peripheral: USB OTG (On The Go) Device"
- select USB_GADGET_MUSB_HDRC
- select USB_OTG
- help
- The most notable feature of USB OTG is support for a
- "Dual-Role" device, which can act as either a device
- or a host. The initial role choice can be changed
- later, when two dual-role devices talk to each other.
+config USB_MUSB_DAVINCI
+ tristate "DaVinci"
+ depends on ARCH_DAVINCI_DMx
+ depends on BROKEN
- At this writing, the OTG support in this driver is incomplete,
- omitting the mandatory HNP or SRP protocols. However, some
- of the cable based role switching works. (That is, grounding
- the ID pin switches the controller to host mode, while leaving
- it floating leaves it in peripheral mode.)
+config USB_MUSB_DA8XX
+ tristate "DA8xx/OMAP-L1x"
+ depends on ARCH_DAVINCI_DA8XX
+ depends on BROKEN
- Select this if your system has a Mini-AB connector, or
- to simplify certain kinds of configuration.
+config USB_MUSB_TUSB6010
+ tristate "TUSB6010"
- To implement your OTG Targeted Peripherals List (TPL), enable
- USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
- to match your requirements.
+config USB_MUSB_OMAP2PLUS
+ tristate "OMAP2430 and onwards"
+ depends on ARCH_OMAP2PLUS && USB
+ select GENERIC_PHY
-endchoice
+config USB_MUSB_AM35X
+ tristate "AM35x"
+ depends on ARCH_OMAP
-# enable peripheral support (including with OTG)
-config USB_GADGET_MUSB_HDRC
- bool
- depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
-# default y
-# select USB_GADGET_DUALSPEED
-# select USB_GADGET_SELECTED
+config USB_MUSB_DSPS
+ tristate "TI DSPS platforms"
+ select USB_MUSB_AM335X_CHILD
+ depends on OF_IRQ
-# enables host support (including with OTG)
-config USB_MUSB_HDRC_HCD
- bool
- depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
- select USB_OTG if USB_GADGET_MUSB_HDRC
- default y
+config USB_MUSB_BLACKFIN
+ tristate "Blackfin"
+ depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523)
+config USB_MUSB_UX500
+ tristate "Ux500 platforms"
-config MUSB_PIO_ONLY
- bool 'Disable DMA (always use PIO)'
- depends on USB_MUSB_HDRC
- default y if USB_TUSB6010
+config USB_MUSB_JZ4740
+ tristate "JZ4740"
+ depends on MACH_JZ4740 || COMPILE_TEST
+ depends on USB_MUSB_GADGET
+ depends on USB_OTG_BLACKLIST_HUB
+
+endchoice
+
+config USB_MUSB_AM335X_CHILD
+ tristate
+
+choice
+ prompt 'MUSB DMA mode'
+ default MUSB_PIO_ONLY if ARCH_MULTIPLATFORM || USB_MUSB_JZ4740
+ default USB_UX500_DMA if USB_MUSB_UX500
+ default USB_INVENTRA_DMA if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+ default USB_TI_CPPI_DMA if USB_MUSB_DAVINCI
+ default USB_TUSB_OMAP_DMA if USB_MUSB_TUSB6010
+ default MUSB_PIO_ONLY if USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X \
+ || USB_MUSB_DSPS
help
- All data is copied between memory and FIFO by the CPU.
- DMA controllers are ignored.
+ Unfortunately, only one option can be enabled here. Ideally one
+ should be able to build all these drivers into one kernel to
+ allow using DMA on multiplatform kernels.
- Do not select 'n' here unless DMA support for your SOC or board
- is unavailable (or unstable). When DMA is enabled at compile time,
- you can still disable it at run time using the "use_dma=n" module
- parameter.
+config USB_UX500_DMA
+ bool 'ST Ericsson Ux500'
+ depends on USB_MUSB_UX500
+ help
+ Enable DMA transfers on UX500 platforms.
config USB_INVENTRA_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default ARCH_OMAP2430 || ARCH_OMAP34XX || BLACKFIN
+ bool 'Inventra'
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
help
Enable DMA transfers using Mentor's engine.
config USB_TI_CPPI_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default ARCH_DAVINCI
+ bool 'TI CPPI (Davinci)'
+ depends on USB_MUSB_DAVINCI
help
Enable DMA transfers when TI CPPI DMA is available.
+config USB_TI_CPPI41_DMA
+ bool 'TI CPPI 4.1 (AM335x)'
+ depends on ARCH_OMAP
+ select TI_CPPI41
+
config USB_TUSB_OMAP_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- depends on USB_TUSB6010
+ bool 'TUSB 6010'
+ depends on USB_MUSB_TUSB6010 = USB_MUSB_HDRC # both built-in or both modules
depends on ARCH_OMAP
- default y
help
Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
-config USB_MUSB_DEBUG
- depends on USB_MUSB_HDRC
- bool "Enable debugging messages"
- default n
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
help
- This enables musb debugging. To set the logging level use the debug
- module parameter. Starting at level 3, per-transfer (urb, usb_request,
- packet, or dma transfer) tracing may kick in.
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not choose this unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+endchoice
+
+endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 85710ccc188..ba495018b41 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -2,76 +2,35 @@
# for USB OTG silicon based on Mentor Graphics INVENTRA designs
#
-musb_hdrc-objs := musb_core.o
+obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
-obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
+musb_hdrc-y := musb_core.o
-ifeq ($(CONFIG_ARCH_DAVINCI),y)
- musb_hdrc-objs += davinci.o
-endif
+musb_hdrc-$(CONFIG_USB_MUSB_HOST)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_virthub.o musb_host.o
+musb_hdrc-$(CONFIG_USB_MUSB_GADGET)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_gadget_ep0.o musb_gadget.o
+musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o
-ifeq ($(CONFIG_USB_TUSB6010),y)
- musb_hdrc-objs += tusb6010.o
-endif
+# Hardware Glue Layer
+obj-$(CONFIG_USB_MUSB_OMAP2PLUS) += omap2430.o
+obj-$(CONFIG_USB_MUSB_AM35X) += am35x.o
+obj-$(CONFIG_USB_MUSB_DSPS) += musb_dsps.o
+obj-$(CONFIG_USB_MUSB_TUSB6010) += tusb6010.o
+obj-$(CONFIG_USB_MUSB_DAVINCI) += davinci.o
+obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
+obj-$(CONFIG_USB_MUSB_BLACKFIN) += blackfin.o
+obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
+obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
-ifeq ($(CONFIG_ARCH_OMAP2430),y)
- musb_hdrc-objs += omap2430.o
-endif
-ifeq ($(CONFIG_ARCH_OMAP3430),y)
- musb_hdrc-objs += omap2430.o
-endif
-
-ifeq ($(CONFIG_BF54x),y)
- musb_hdrc-objs += blackfin.o
-endif
-
-ifeq ($(CONFIG_BF52x),y)
- musb_hdrc-objs += blackfin.o
-endif
-
-ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
- musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o
-endif
-
-ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
- musb_hdrc-objs += musb_virthub.o musb_host.o
-endif
+obj-$(CONFIG_USB_MUSB_AM335X_CHILD) += musb_am335x.o
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
# PIO only, or DMA (several potential schemes).
# though PIO is always there to back up DMA, and for ep0
-ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
-
- ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
- musb_hdrc-objs += musbhsdma.o
-
- else
- ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
- musb_hdrc-objs += cppi_dma.o
-
- else
- ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
- musb_hdrc-objs += tusb6010_omap.o
-
- endif
- endif
- endif
-endif
-
-
-################################################################################
-
-# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
-
-ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
- EXTRA_CFLAGS += -DMUSB_AHB_ID
-endif
-
-# Debugging
-
-ifeq ($(CONFIG_USB_MUSB_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
+musb_hdrc-$(CONFIG_USB_INVENTRA_DMA) += musbhsdma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI_DMA) += cppi_dma.o
+musb_hdrc-$(CONFIG_USB_TUSB_OMAP_DMA) += tusb6010_omap.o
+musb_hdrc-$(CONFIG_USB_UX500_DMA) += ux500_dma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI41_DMA) += musb_cppi41.o
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
new file mode 100644
index 00000000000..0a34dd85955
--- /dev/null
+++ b/drivers/usb/musb/am35x.c
@@ -0,0 +1,623 @@
+/*
+ * Texas Instruments AM35x "glue layer"
+ *
+ * Copyright (c) 2010, by Texas Instruments
+ *
+ * Based on the DA8xx "glue layer" code.
+ * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/platform_data/usb-omap.h>
+
+#include "musb_core.h"
+
+/*
+ * AM35x specific definitions
+ */
+/* USB 2.0 OTG module registers */
+#define USB_REVISION_REG 0x00
+#define USB_CTRL_REG 0x04
+#define USB_STAT_REG 0x08
+#define USB_EMULATION_REG 0x0c
+/* 0x10 Reserved */
+#define USB_AUTOREQ_REG 0x14
+#define USB_SRP_FIX_TIME_REG 0x18
+#define USB_TEARDOWN_REG 0x1c
+#define EP_INTR_SRC_REG 0x20
+#define EP_INTR_SRC_SET_REG 0x24
+#define EP_INTR_SRC_CLEAR_REG 0x28
+#define EP_INTR_MASK_REG 0x2c
+#define EP_INTR_MASK_SET_REG 0x30
+#define EP_INTR_MASK_CLEAR_REG 0x34
+#define EP_INTR_SRC_MASKED_REG 0x38
+#define CORE_INTR_SRC_REG 0x40
+#define CORE_INTR_SRC_SET_REG 0x44
+#define CORE_INTR_SRC_CLEAR_REG 0x48
+#define CORE_INTR_MASK_REG 0x4c
+#define CORE_INTR_MASK_SET_REG 0x50
+#define CORE_INTR_MASK_CLEAR_REG 0x54
+#define CORE_INTR_SRC_MASKED_REG 0x58
+/* 0x5c Reserved */
+#define USB_END_OF_INTR_REG 0x60
+
+/* Control register bits */
+#define AM35X_SOFT_RESET_MASK 1
+
+/* USB interrupt register bits */
+#define AM35X_INTR_USB_SHIFT 16
+#define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
+#define AM35X_INTR_DRVVBUS 0x100
+#define AM35X_INTR_RX_SHIFT 16
+#define AM35X_INTR_TX_SHIFT 0
+#define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
+#define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */
+#define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
+#define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
+
+#define USB_MENTOR_CORE_OFFSET 0x400
+
+struct am35x_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+ struct clk *phy_clk;
+ struct clk *clk;
+};
+
+/*
+ * am35x_musb_enable - enable interrupts
+ */
+static void am35x_musb_enable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 epmask;
+
+ /* Workaround: setup IRQs through both register sets. */
+ epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
+ ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
+
+ musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
+ musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
+
+ /* Force the DRVVBUS IRQ so we can start polling for ID change. */
+ musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
+ AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
+}
+
+/*
+ * am35x_musb_disable - disable HDRC and flush interrupts
+ */
+static void am35x_musb_disable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+
+ musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
+ musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
+ AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
+}
+
+#define portstate(stmt) stmt
+
+static void am35x_musb_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+}
+
+#define POLL_SECONDS 2
+
+static struct timer_list otg_workaround;
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /*
+ * We poll because AM35x's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
+ MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->state));
+ del_timer(&otg_workaround);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&otg_workaround, timeout);
+}
+
+static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
+{
+ struct musb *musb = hci;
+ void __iomem *reg_base = musb->ctrl_base;
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ struct usb_otg *otg = musb->xceiv->otg;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u32 epintr, usbintr;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Get endpoint interrupts */
+ epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
+
+ if (epintr) {
+ musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
+
+ musb->int_rx =
+ (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
+ musb->int_tx =
+ (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
+ }
+
+ /* Get usb core interrupts */
+ usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
+ if (!usbintr && !epintr)
+ goto eoi;
+
+ if (usbintr) {
+ musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
+
+ musb->int_usb =
+ (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
+ }
+ /*
+ * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
+ * AM35x's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.SESSION per Mentor docs requires that we know its
+ * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+ */
+ if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
+ int drvvbus = musb_readl(reg_base, USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err;
+
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
+ if (err) {
+ /*
+ * The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"-starting sessions
+ * without waiting for VBUS to stop registering in
+ * devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (drvvbus) {
+ MUSB_HST_MODE(musb);
+ otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&otg_workaround);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ otg->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ /* NOTE: this must complete power-on within 100 ms. */
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ usb_otg_state_string(musb->xceiv->state),
+ err ? " ERROR" : "",
+ devctl);
+ ret = IRQ_HANDLED;
+ }
+
+ /* Drop spurious RX and TX if device is disconnected */
+ if (musb->int_usb & MUSB_INTR_DISCONNECT) {
+ musb->int_tx = 0;
+ musb->int_rx = 0;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ ret |= musb_interrupt(musb);
+
+eoi:
+ /* EOI needs to be written for the IRQ to be re-asserted. */
+ if (ret == IRQ_HANDLED || epintr || usbintr) {
+ /* clear level interrupt */
+ if (data->clear_irq)
+ data->clear_irq();
+ /* write EOI */
+ musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
+ }
+
+ /* Poll for ID change */
+ if (musb->xceiv->state == OTG_STATE_B_IDLE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
+{
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ int retval = 0;
+
+ if (data->set_mode)
+ data->set_mode(musb_mode);
+ else
+ retval = -EIO;
+
+ return retval;
+}
+
+static int am35x_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 rev;
+
+ musb->mregs += USB_MENTOR_CORE_OFFSET;
+
+ /* Returns zero if e.g. not clocked */
+ rev = musb_readl(reg_base, USB_REVISION_REG);
+ if (!rev)
+ return -ENODEV;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv))
+ return -EPROBE_DEFER;
+
+ setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+
+ /* Reset the musb */
+ if (data->reset)
+ data->reset();
+
+ /* Reset the controller */
+ musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
+
+ /* Start the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(1);
+
+ msleep(5);
+
+ musb->isr = am35x_musb_interrupt;
+
+ /* clear level interrupt */
+ if (data->clear_irq)
+ data->clear_irq();
+
+ return 0;
+}
+
+static int am35x_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+
+ del_timer_sync(&otg_workaround);
+
+ /* Shutdown the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(0);
+
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+/* AM35x supports only 32bit read operation */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ void __iomem *fifo = hw_ep->fifo;
+ u32 val;
+ int i;
+
+ /* Read for 32bit-aligned destination address */
+ if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
+ readsl(fifo, dst, len >> 2);
+ dst += len & ~0x03;
+ len &= 0x03;
+ }
+ /*
+ * Now read the remaining 1 to 3 byte or complete length if
+ * unaligned address.
+ */
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ *(u32 *) dst = musb_readl(fifo, 0);
+ dst += 4;
+ }
+ len &= 0x03;
+ }
+ if (len > 0) {
+ val = musb_readl(fifo, 0);
+ memcpy(dst, &val, len);
+ }
+}
+
+static const struct musb_platform_ops am35x_ops = {
+ .init = am35x_musb_init,
+ .exit = am35x_musb_exit,
+
+ .enable = am35x_musb_enable,
+ .disable = am35x_musb_disable,
+
+ .set_mode = am35x_musb_set_mode,
+ .try_idle = am35x_musb_try_idle,
+
+ .set_vbus = am35x_musb_set_vbus,
+};
+
+static const struct platform_device_info am35x_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int am35x_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct am35x_glue *glue;
+ struct platform_device_info pinfo;
+ struct clk *phy_clk;
+ struct clk *clk;
+
+ int ret = -ENOMEM;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "failed to allocate glue context\n");
+ goto err0;
+ }
+
+ phy_clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(phy_clk)) {
+ dev_err(&pdev->dev, "failed to get PHY clock\n");
+ ret = PTR_ERR(phy_clk);
+ goto err3;
+ }
+
+ clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err4;
+ }
+
+ ret = clk_enable(phy_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PHY clock\n");
+ goto err5;
+ }
+
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err6;
+ }
+
+ glue->dev = &pdev->dev;
+ glue->phy_clk = phy_clk;
+ glue->clk = clk;
+
+ pdata->platform_ops = &am35x_ops;
+
+ glue->phy = usb_phy_generic_register();
+ if (IS_ERR(glue->phy))
+ goto err7;
+ platform_set_drvdata(pdev, glue);
+
+ pinfo = am35x_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ goto err8;
+ }
+
+ return 0;
+
+err8:
+ usb_phy_generic_unregister(glue->phy);
+
+err7:
+ clk_disable(clk);
+
+err6:
+ clk_disable(phy_clk);
+
+err5:
+ clk_put(clk);
+
+err4:
+ clk_put(phy_clk);
+
+err3:
+ kfree(glue);
+
+err0:
+ return ret;
+}
+
+static int am35x_remove(struct platform_device *pdev)
+{
+ struct am35x_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(glue->phy);
+ clk_disable(glue->clk);
+ clk_disable(glue->phy_clk);
+ clk_put(glue->clk);
+ clk_put(glue->phy_clk);
+ kfree(glue);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int am35x_suspend(struct device *dev)
+{
+ struct am35x_glue *glue = dev_get_drvdata(dev);
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+
+ /* Shutdown the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(0);
+
+ clk_disable(glue->phy_clk);
+ clk_disable(glue->clk);
+
+ return 0;
+}
+
+static int am35x_resume(struct device *dev)
+{
+ struct am35x_glue *glue = dev_get_drvdata(dev);
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+ int ret;
+
+ /* Start the on-chip PHY and its PLL. */
+ if (data->set_phy_power)
+ data->set_phy_power(1);
+
+ ret = clk_enable(glue->phy_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable PHY clock\n");
+ return ret;
+ }
+
+ ret = clk_enable(glue->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(am35x_pm_ops, am35x_suspend, am35x_resume);
+
+static struct platform_driver am35x_driver = {
+ .probe = am35x_probe,
+ .remove = am35x_remove,
+ .driver = {
+ .name = "musb-am35x",
+ .pm = &am35x_pm_ops,
+ },
+};
+
+MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
+MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(am35x_driver);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 78613485209..d40d5f0b552 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -11,97 +11,156 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/list.h>
-#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+#include <linux/usb/usb_phy_generic.h>
#include <asm/cacheflush.h>
#include "musb_core.h"
+#include "musbhsdma.h"
#include "blackfin.h"
+struct bfin_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+};
+#define glue_to_musb(g) platform_get_drvdata(g->musb)
+
/*
* Load an endpoint's FIFO
*/
void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
{
+ struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
void __iomem *epio = hw_ep->regs;
+ u8 epnum = hw_ep->epnum;
prefetch((u8 *)src);
musb_writew(epio, MUSB_TXCOUNT, len);
- DBG(4, "TX ep%d fifo %p count %d buf %p, epio %p\n",
+ dev_dbg(musb->controller, "TX ep%d fifo %p count %d buf %p, epio %p\n",
hw_ep->epnum, fifo, len, src, epio);
dump_fifo_data(src, len);
- if (unlikely((unsigned long)src & 0x01))
- outsw_8((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
- else
- outsw((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
-}
+ if (!ANOMALY_05000380 && epnum != 0) {
+ u16 dma_reg;
+
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)(src + len));
+
+ /* Setup DMA address register */
+ dma_reg = (u32)src;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
+ SSYNC();
+
+ dma_reg = (u32)src >> 16;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
+ SSYNC();
+
+ /* Setup DMA count register */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
+ SSYNC();
+ /* Enable the DMA */
+ dma_reg = (epnum << 4) | DMA_ENA | INT_ENA | DIRECTION;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
+ SSYNC();
+
+ /* Wait for complete */
+ while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
+ cpu_relax();
+
+ /* acknowledge dma interrupt */
+ bfin_write_USB_DMA_INTERRUPT(1 << epnum);
+ SSYNC();
+
+ /* Reset DMA */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
+ SSYNC();
+ } else {
+ SSYNC();
+
+ if (unlikely((unsigned long)src & 0x01))
+ outsw_8((unsigned long)fifo, src, (len + 1) >> 1);
+ else
+ outsw((unsigned long)fifo, src, (len + 1) >> 1);
+ }
+}
/*
* Unload an endpoint's FIFO
*/
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
+ struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
- u16 dma_reg = 0;
- DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
- 'R', hw_ep->epnum, fifo, len, dst);
+ if (ANOMALY_05000467 && epnum != 0) {
+ u16 dma_reg;
-#ifdef CONFIG_BF52x
- invalidate_dcache_range((unsigned int)dst,
- (unsigned int)(dst + len));
+ invalidate_dcache_range((unsigned long)dst,
+ (unsigned long)(dst + len));
- /* Setup DMA address register */
- dma_reg = (u16) ((u32) dst & 0xFFFF);
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
- SSYNC();
+ /* Setup DMA address register */
+ dma_reg = (u32)dst;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
+ SSYNC();
- dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
- SSYNC();
+ dma_reg = (u32)dst >> 16;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
+ SSYNC();
- /* Setup DMA count register */
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
- SSYNC();
+ /* Setup DMA count register */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
+ SSYNC();
- /* Enable the DMA */
- dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
- SSYNC();
+ /* Enable the DMA */
+ dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
+ SSYNC();
- /* Wait for compelete */
- while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
- cpu_relax();
+ /* Wait for complete */
+ while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
+ cpu_relax();
- /* acknowledge dma interrupt */
- bfin_write_USB_DMA_INTERRUPT(1 << epnum);
- SSYNC();
+ /* acknowledge dma interrupt */
+ bfin_write_USB_DMA_INTERRUPT(1 << epnum);
+ SSYNC();
- /* Reset DMA */
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
- SSYNC();
-#else
- if (unlikely((unsigned long)dst & 0x01))
- insw_8((unsigned long)fifo, dst,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
- else
- insw((unsigned long)fifo, dst,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
-#endif
+ /* Reset DMA */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
+ SSYNC();
+ } else {
+ SSYNC();
+ /* Read the last byte of packet with odd size from address fifo + 4
+ * to trigger 1 byte access to EP0 FIFO.
+ */
+ if (len == 1)
+ *dst = (u8)inw((unsigned long)fifo + 4);
+ else {
+ if (unlikely((unsigned long)dst & 0x01))
+ insw_8((unsigned long)fifo, dst, len >> 1);
+ else
+ insw((unsigned long)fifo, dst, len >> 1);
+
+ if (len & 0x01)
+ *(dst + len - 1) = (u8)inw((unsigned long)fifo + 4);
+ }
+ }
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->epnum, fifo, len, dst);
dump_fifo_data(dst, len);
}
@@ -125,15 +184,17 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
retval = musb_interrupt(musb);
}
- spin_unlock_irqrestore(&musb->lock, flags);
+ /* Start sampling ID pin, when plug is removed from MUSB */
+ if ((musb->xceiv->state == OTG_STATE_B_IDLE
+ || musb->xceiv->state == OTG_STATE_A_WAIT_BCON) ||
+ (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ musb->a_wait_bcon = TIMER_DELAY;
+ }
- /* REVISIT we sometimes get spurious IRQs on g_ep0
- * not clear why... fall in BF54x too.
- */
- if (retval != IRQ_HANDLED)
- DBG(5, "spurious?\n");
+ spin_unlock_irqrestore(&musb->lock, flags);
- return IRQ_HANDLED;
+ return retval;
}
static void musb_conn_timer_handler(unsigned long _musb)
@@ -141,20 +202,50 @@ static void musb_conn_timer_handler(unsigned long _musb)
struct musb *musb = (void *)_musb;
unsigned long flags;
u16 val;
+ static u8 toggle;
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_IDLE:
case OTG_STATE_A_WAIT_BCON:
/* Start a new session */
val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ val &= ~MUSB_DEVCTL_SESSION;
+ musb_writew(musb->mregs, MUSB_DEVCTL, val);
val |= MUSB_DEVCTL_SESSION;
musb_writew(musb->mregs, MUSB_DEVCTL, val);
+ /* Check if musb is host or peripheral. */
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+
+ if (!(val & MUSB_DEVCTL_BDEVICE)) {
+ gpio_set_value(musb->config->gpio_vrsel, 1);
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ } else {
+ gpio_set_value(musb->config->gpio_vrsel, 0);
+ /* Ignore VBUSERROR and SUSPEND IRQ */
+ val = musb_readb(musb->mregs, MUSB_INTRUSBE);
+ val &= ~MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSBE, val);
+ val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSB, val);
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ }
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ break;
+ case OTG_STATE_B_IDLE:
+ /*
+ * Start a new session. It seems that MUSB needs taking
+ * some time to recognize the type of the plug inserted?
+ */
val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ val |= MUSB_DEVCTL_SESSION;
+ musb_writew(musb->mregs, MUSB_DEVCTL, val);
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+
if (!(val & MUSB_DEVCTL_BDEVICE)) {
gpio_set_value(musb->config->gpio_vrsel, 1);
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
} else {
gpio_set_value(musb->config->gpio_vrsel, 0);
@@ -166,87 +257,98 @@ static void musb_conn_timer_handler(unsigned long _musb)
val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
musb_writeb(musb->mregs, MUSB_INTRUSB, val);
- val = MUSB_POWER_HSENAB;
- musb_writeb(musb->mregs, MUSB_POWER, val);
+ /* Toggle the Soft Conn bit, so that we can response to
+ * the inserting of either A-plug or B-plug.
+ */
+ if (toggle) {
+ val = musb_readb(musb->mregs, MUSB_POWER);
+ val &= ~MUSB_POWER_SOFTCONN;
+ musb_writeb(musb->mregs, MUSB_POWER, val);
+ toggle = 0;
+ } else {
+ val = musb_readb(musb->mregs, MUSB_POWER);
+ val |= MUSB_POWER_SOFTCONN;
+ musb_writeb(musb->mregs, MUSB_POWER, val);
+ toggle = 1;
+ }
+ /* The delay time is set to 1/4 second by default,
+ * shortening it, if accelerating A-plug detection
+ * is needed in OTG mode.
+ */
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY / 4);
}
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
break;
-
default:
- DBG(1, "%s state not handled\n", otg_state_string(musb));
+ dev_dbg(musb->controller, "%s state not handled\n",
+ usb_otg_state_string(musb->xceiv->state));
break;
}
spin_unlock_irqrestore(&musb->lock, flags);
- DBG(4, "state is %s\n", otg_state_string(musb));
+ dev_dbg(musb->controller, "state is %s\n",
+ usb_otg_state_string(musb->xceiv->state));
}
-void musb_platform_enable(struct musb *musb)
+static void bfin_musb_enable(struct musb *musb)
{
- if (is_host_enabled(musb)) {
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
- musb->a_wait_bcon = TIMER_DELAY;
- }
+ /* REVISIT is this really correct ? */
}
-void musb_platform_disable(struct musb *musb)
+static void bfin_musb_disable(struct musb *musb)
{
}
-static void bfin_vbus_power(struct musb *musb, int is_on, int sleeping)
+static void bfin_musb_set_vbus(struct musb *musb, int is_on)
{
-}
+ int value = musb->config->gpio_vrsel_active;
+ if (!is_on)
+ value = !value;
+ gpio_set_value(musb->config->gpio_vrsel, value);
-static void bfin_set_vbus(struct musb *musb, int is_on)
-{
- if (is_on)
- gpio_set_value(musb->config->gpio_vrsel, 1);
- else
- gpio_set_value(musb->config->gpio_vrsel, 0);
-
- DBG(1, "VBUS %s, devctl %02x "
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x "
/* otg %3x conf %08x prcm %08x */ "\n",
- otg_state_string(musb),
+ usb_otg_state_string(musb->xceiv->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
-static int bfin_set_power(struct otg_transceiver *x, unsigned mA)
+static int bfin_musb_set_power(struct usb_phy *x, unsigned mA)
{
return 0;
}
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
-{
- if (is_host_enabled(musb))
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
-}
-
-int musb_platform_get_vbus_status(struct musb *musb)
+static int bfin_musb_vbus_status(struct musb *musb)
{
return 0;
}
-void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
{
+ return -EIO;
}
-int __init musb_platform_init(struct musb *musb)
+static int bfin_musb_adjust_channel_params(struct dma_channel *channel,
+ u16 packet_sz, u8 *mode,
+ dma_addr_t *dma_addr, u32 *len)
{
+ struct musb_dma_channel *musb_channel = channel->private_data;
/*
- * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
- * and OTG HOST modes, while rev 1.1 and greater require PE7 to
- * be low for DEVICE mode and high for HOST mode. We set it high
- * here because we are in host mode
+ * Anomaly 05000450 might cause data corruption when using DMA
+ * MODE 1 transmits with short packet. So to work around this,
+ * we truncate all MODE 1 transfers down to a multiple of the
+ * max packet size, and then do the last short packet transfer
+ * (if there is any) using MODE 0.
*/
-
- if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
- printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
- musb->config->gpio_vrsel);
- return -ENODEV;
+ if (ANOMALY_05000450) {
+ if (musb_channel->transmit && *mode == 1)
+ *len = *len - (*len % packet_sz);
}
- gpio_direction_output(musb->config->gpio_vrsel, 0);
+ return 0;
+}
+
+static void bfin_musb_reg_init(struct musb *musb)
+{
if (ANOMALY_05000346) {
bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
SSYNC();
@@ -257,12 +359,9 @@ int __init musb_platform_init(struct musb *musb)
SSYNC();
}
- /* TODO
- * Set SIC-IVG register
- */
-
/* Configure PLL oscillator register */
- bfin_write_USB_PLLOSC_CTRL(0x30a8);
+ bfin_write_USB_PLLOSC_CTRL(0x3080 |
+ ((480/musb->config->clkin) << 1));
SSYNC();
bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
@@ -284,37 +383,203 @@ int __init musb_platform_init(struct musb *musb)
EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
SSYNC();
+}
- if (is_host_enabled(musb)) {
- musb->board_set_vbus = bfin_set_vbus;
- setup_timer(&musb_conn_timer,
- musb_conn_timer_handler, (unsigned long) musb);
+static int bfin_musb_init(struct musb *musb)
+{
+
+ /*
+ * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
+ * and OTG HOST modes, while rev 1.1 and greater require PE7 to
+ * be low for DEVICE mode and high for HOST mode. We set it high
+ * here because we are in host mode
+ */
+
+ if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
+ printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
+ musb->config->gpio_vrsel);
+ return -ENODEV;
+ }
+ gpio_direction_output(musb->config->gpio_vrsel, 0);
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ gpio_free(musb->config->gpio_vrsel);
+ return -EPROBE_DEFER;
}
- if (is_peripheral_enabled(musb))
- musb->xceiv.set_power = bfin_set_power;
+
+ bfin_musb_reg_init(musb);
+
+ setup_timer(&musb_conn_timer, musb_conn_timer_handler,
+ (unsigned long) musb);
+
+ musb->xceiv->set_power = bfin_musb_set_power;
musb->isr = blackfin_interrupt;
+ musb->double_buffer_not_ok = true;
+
+ return 0;
+}
+
+static int bfin_musb_exit(struct musb *musb)
+{
+ gpio_free(musb->config->gpio_vrsel);
+ usb_put_phy(musb->xceiv);
return 0;
}
-int musb_platform_suspend(struct musb *musb)
+static const struct musb_platform_ops bfin_ops = {
+ .init = bfin_musb_init,
+ .exit = bfin_musb_exit,
+
+ .enable = bfin_musb_enable,
+ .disable = bfin_musb_disable,
+
+ .set_mode = bfin_musb_set_mode,
+
+ .vbus_status = bfin_musb_vbus_status,
+ .set_vbus = bfin_musb_set_vbus,
+
+ .adjust_channel_params = bfin_musb_adjust_channel_params,
+};
+
+static u64 bfin_dmamask = DMA_BIT_MASK(32);
+
+static int bfin_probe(struct platform_device *pdev)
{
+ struct resource musb_resources[2];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct bfin_glue *glue;
+
+ int ret = -ENOMEM;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "failed to allocate glue context\n");
+ goto err0;
+ }
+
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ goto err1;
+ }
+
+ musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = &bfin_dmamask;
+ musb->dev.coherent_dma_mask = bfin_dmamask;
+
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+
+ pdata->platform_ops = &bfin_ops;
+
+ glue->phy = usb_phy_generic_register();
+ if (IS_ERR(glue->phy))
+ goto err2;
+ platform_set_drvdata(pdev, glue);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ ret = platform_device_add_resources(musb, musb_resources,
+ ARRAY_SIZE(musb_resources));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto err3;
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+ goto err3;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err3;
+ }
+
return 0;
+
+err3:
+ usb_phy_generic_unregister(glue->phy);
+
+err2:
+ platform_device_put(musb);
+
+err1:
+ kfree(glue);
+
+err0:
+ return ret;
}
-int musb_platform_resume(struct musb *musb)
+static int bfin_remove(struct platform_device *pdev)
{
+ struct bfin_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(glue->phy);
+ kfree(glue);
+
return 0;
}
+#ifdef CONFIG_PM
+static int bfin_suspend(struct device *dev)
+{
+ struct bfin_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ if (is_host_active(musb))
+ /*
+ * During hibernate gpio_vrsel will change from high to low
+ * low which will generate wakeup event resume the system
+ * immediately. Set it to 0 before hibernate to avoid this
+ * wakeup event.
+ */
+ gpio_set_value(musb->config->gpio_vrsel, 0);
-int musb_platform_exit(struct musb *musb)
+ return 0;
+}
+
+static int bfin_resume(struct device *dev)
{
+ struct bfin_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
- bfin_vbus_power(musb, 0 /*off*/, 1);
- gpio_free(musb->config->gpio_vrsel);
- musb_platform_suspend(musb);
+ bfin_musb_reg_init(musb);
return 0;
}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bfin_pm_ops, bfin_suspend, bfin_resume);
+
+static struct platform_driver bfin_driver = {
+ .probe = bfin_probe,
+ .remove = __exit_p(bfin_remove),
+ .driver = {
+ .name = "musb-blackfin",
+ .pm = &bfin_pm_ops,
+ },
+};
+
+MODULE_DESCRIPTION("Blackfin MUSB Glue Layer");
+MODULE_AUTHOR("Bryan Wy <cooloney@kernel.org>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(bfin_driver);
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index a240c1e53d1..c84dae546dc 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -14,6 +14,43 @@
* Blackfin specific definitions
*/
+/* Anomalies notes:
+ *
+ * 05000450 - USB DMA Mode 1 Short Packet Data Corruption:
+ * MUSB driver is designed to transfer buffer of N * maxpacket size
+ * in DMA mode 1 and leave the rest of the data to the next
+ * transfer in DMA mode 0, so we never transmit a short packet in
+ * DMA mode 1.
+ *
+ * 05000463 - This anomaly doesn't affect this driver since it
+ * never uses L1 or L2 memory as data destination.
+ *
+ * 05000464 - This anomaly doesn't affect this driver since it
+ * never uses L1 or L2 memory as data source.
+ *
+ * 05000465 - The anomaly can be seen when SCLK is over 100 MHz, and there is
+ * no way to workaround for bulk endpoints. Since the wMaxPackSize
+ * of bulk is less than or equal to 512, while the fifo size of
+ * endpoint 5, 6, 7 is 1024, the double buffer mode is enabled
+ * automatically when these endpoints are used for bulk OUT.
+ *
+ * 05000466 - This anomaly doesn't affect this driver since it never mixes
+ * concurrent DMA and core accesses to the TX endpoint FIFOs.
+ *
+ * 05000467 - The workaround for this anomaly will introduce another
+ * anomaly - 05000465.
+ */
+
+/* The Mentor USB DMA engine on BF52x (silicon v0.0 and v0.1) seems to be
+ * unstable in host mode. This may be caused by Anomaly 05000380. After
+ * digging out the root cause, we will change this number accordingly.
+ * So, need to either use silicon v0.2+ or disable DMA mode in MUSB.
+ */
+#if ANOMALY_05000380 && defined(CONFIG_BF52x) && \
+ !defined(CONFIG_MUSB_PIO_ONLY)
+# error "Please use PIO mode in MUSB driver on bf52x chip v0.0 and v0.1"
+#endif
+
#undef DUMP_FIFO_DATA
#ifdef DUMP_FIFO_DATA
static void dump_fifo_data(u8 *buf, u16 len)
@@ -32,7 +69,6 @@ static void dump_fifo_data(u8 *buf, u16 len)
#define dump_fifo_data(buf, len) do {} while (0)
#endif
-#ifdef CONFIG_BF52x
#define USB_DMA_BASE USB_DMA_INTERRUPT
#define USB_DMAx_CTRL 0x04
@@ -42,7 +78,6 @@ static void dump_fifo_data(u8 *buf, u16 len)
#define USB_DMAx_COUNT_HIGH 0x14
#define USB_DMA_REG(ep, reg) (USB_DMA_BASE + 0x20 * ep + reg)
-#endif
/* Almost 1 second */
#define TIMER_DELAY (1 * HZ)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 569ef0fed0f..904fb85d85a 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -6,6 +6,9 @@
* The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
*/
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/usb.h>
#include "musb_core.h"
@@ -102,7 +105,7 @@ static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
musb_writel(&tx->tx_complete, 0, ptr);
}
-static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
{
int j;
@@ -147,14 +150,11 @@ static void cppi_pool_free(struct cppi_channel *c)
c->last_processed = NULL;
}
-static int __init cppi_controller_start(struct dma_controller *c)
+static void cppi_controller_start(struct cppi *controller)
{
- struct cppi *controller;
void __iomem *tibase;
int i;
- controller = container_of(c, struct cppi, controller);
-
/* do whatever is necessary to start controller */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
controller->tx[i].transmit = true;
@@ -209,8 +209,6 @@ static int __init cppi_controller_start(struct dma_controller *c)
/* disable RNDIS mode, also host rx RNDIS autorequest */
musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
-
- return 0;
}
/*
@@ -219,13 +217,13 @@ static int __init cppi_controller_start(struct dma_controller *c)
* De-Init the DMA controller as necessary.
*/
-static int cppi_controller_stop(struct dma_controller *c)
+static void cppi_controller_stop(struct cppi *controller)
{
- struct cppi *controller;
void __iomem *tibase;
int i;
+ struct musb *musb;
- controller = container_of(c, struct cppi, controller);
+ musb = controller->musb;
tibase = controller->tibase;
/* DISABLE INDIVIDUAL CHANNEL Interrupts */
@@ -234,7 +232,7 @@ static int cppi_controller_stop(struct dma_controller *c)
musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
- DBG(1, "Tearing down RX and TX Channels\n");
+ dev_dbg(musb->controller, "Tearing down RX and TX Channels\n");
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
/* FIXME restructure of txdma to use bds like rxdma */
controller->tx[i].last_processed = NULL;
@@ -250,8 +248,6 @@ static int cppi_controller_stop(struct dma_controller *c)
/*disable tx/rx cppi */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
-
- return 0;
}
/* While dma channel is allocated, we only want the core irqs active
@@ -287,9 +283,11 @@ cppi_channel_allocate(struct dma_controller *c,
u8 index;
struct cppi_channel *cppi_ch;
void __iomem *tibase;
+ struct musb *musb;
controller = container_of(c, struct cppi, controller);
tibase = controller->tibase;
+ musb = controller->musb;
/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
index = ep->epnum - 1;
@@ -299,13 +297,13 @@ cppi_channel_allocate(struct dma_controller *c,
*/
if (transmit) {
if (index >= ARRAY_SIZE(controller->tx)) {
- DBG(1, "no %cX%d CPPI channel\n", 'T', index);
+ dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index);
return NULL;
}
cppi_ch = controller->tx + index;
} else {
if (index >= ARRAY_SIZE(controller->rx)) {
- DBG(1, "no %cX%d CPPI channel\n", 'R', index);
+ dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index);
return NULL;
}
cppi_ch = controller->rx + index;
@@ -316,12 +314,13 @@ cppi_channel_allocate(struct dma_controller *c,
* with the other DMA engine too
*/
if (cppi_ch->hw_ep)
- DBG(1, "re-allocating DMA%d %cX channel %p\n",
+ dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n",
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->channel.max_len = 0x7fffffff;
- DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+ dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
}
@@ -336,7 +335,8 @@ static void cppi_channel_release(struct dma_channel *channel)
c = container_of(channel, struct cppi_channel, channel);
tibase = c->controller->tibase;
if (!c->hw_ep)
- DBG(1, "releasing idle DMA channel %p\n", c);
+ dev_dbg(c->controller->musb->controller,
+ "releasing idle DMA channel %p\n", c);
else if (!c->transmit)
core_rxirq_enable(tibase, c->index + 1);
@@ -354,10 +354,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- DBG(level, "RX DMA%d%s: %d left, csr %04x, "
- "%08x H%08x S%08x C%08x, "
- "B%08x L%08x %08x .. %08x"
- "\n",
+ dev_dbg(c->controller->musb->controller,
+ "RX DMA%d%s: %d left, csr %04x, "
+ "%08x H%08x S%08x C%08x, "
+ "B%08x L%08x %08x .. %08x"
+ "\n",
c->index, tag,
musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -384,10 +385,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- DBG(level, "TX DMA%d%s: csr %04x, "
- "H%08x S%08x C%08x %08x, "
- "F%08x L%08x .. %08x"
- "\n",
+ dev_dbg(c->controller->musb->controller,
+ "TX DMA%d%s: csr %04x, "
+ "H%08x S%08x C%08x %08x, "
+ "F%08x L%08x .. %08x"
+ "\n",
c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR),
@@ -424,7 +426,6 @@ cppi_rndis_update(struct cppi_channel *c, int is_rx,
}
}
-#ifdef CONFIG_USB_MUSB_DEBUG
static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
{
pr_debug("RXBD/%s %08x: "
@@ -433,21 +434,16 @@ static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
bd->hw_next, bd->hw_bufp, bd->hw_off_len,
bd->hw_options);
}
-#endif
static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
{
-#ifdef CONFIG_USB_MUSB_DEBUG
struct cppi_descriptor *bd;
- if (!_dbg_level(level))
- return;
cppi_dump_rx(level, rx, tag);
if (rx->last_processed)
cppi_dump_rxbd("last", rx->last_processed);
for (bd = rx->head; bd; bd = bd->next)
cppi_dump_rxbd("active", bd);
-#endif
}
@@ -503,7 +499,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
if (!(val & MUSB_RXCSR_H_REQPKT)) {
val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, val);
- /* flush writebufer */
+ /* flush writebuffer */
val = musb_readw(regs, MUSB_RXCSR);
}
}
@@ -579,6 +575,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
* trigger the "send a ZLP?" confusion.
*/
rndis = (maxpacket & 0x3f) == 0
+ && length > maxpacket
&& length < 0xffff
&& (length % maxpacket) != 0;
@@ -593,12 +590,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
length = min(n_bds * maxpacket, length);
}
- DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+ dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
n_bds,
- addr, length);
+ (unsigned long long)addr, length);
cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
@@ -650,7 +647,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
bd->hw_options |= CPPI_ZERO_SET;
}
- DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+ dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
@@ -739,7 +736,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
* So this module parameter lets the heuristic be disabled. When using
* gadgetfs, the heuristic will probably need to be disabled.
*/
-static int cppi_rx_rndis = 1;
+static bool cppi_rx_rndis = 1;
module_param(cppi_rx_rndis, bool, 0);
MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
@@ -772,6 +769,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
void __iomem *tibase = musb->ctrl_base;
int is_rndis = 0;
struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
+ struct cppi_descriptor *d;
if (onepacket) {
/* almost every USB driver, host or peripheral side */
@@ -815,8 +813,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
length = min(n_bds * maxpacket, length);
- DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
- "dma 0x%x len %u %u/%u\n",
+ dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+ "dma 0x%llx len %u %u/%u\n",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
@@ -825,7 +823,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff,
- addr, length, rx->channel.actual_len, rx->buf_len);
+ (unsigned long long)addr, length,
+ rx->channel.actual_len, rx->buf_len);
/* only queue one segment at a time, since the hardware prevents
* correct queue shutdown after unexpected short packets
@@ -884,14 +883,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
bd->hw_options |= CPPI_SOP_SET;
tail->hw_options |= CPPI_EOP_SET;
-#ifdef CONFIG_USB_MUSB_DEBUG
- if (_dbg_level(5)) {
- struct cppi_descriptor *d;
-
- for (d = rx->head; d; d = d->next)
- cppi_dump_rxbd("S", d);
- }
-#endif
+ for (d = rx->head; d; d = d->next)
+ cppi_dump_rxbd("S", d);
/* in case the preceding transfer left some state... */
tail = rx->last_processed;
@@ -931,7 +924,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (i < (2 + n_bds)) {
- DBG(2, "bufcnt%d underrun - %d (for %d)\n",
+ dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n",
rx->index, i, n_bds);
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
@@ -980,7 +973,7 @@ static int cppi_channel_program(struct dma_channel *ch,
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_UNKNOWN:
- DBG(1, "%cX DMA%d not allocated!\n",
+ dev_dbg(musb->controller, "%cX DMA%d not allocated!\n",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* FALLTHROUGH */
@@ -1017,6 +1010,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
int i;
dma_addr_t safe2ack;
void __iomem *regs = rx->hw_ep->regs;
+ struct musb *musb = cppi->musb;
cppi_dump_rx(6, rx, "/K");
@@ -1035,9 +1029,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
- DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+ dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x "
"off.len %08x opt.len %08x (%d)\n",
- bd->dma, bd->hw_next, bd->hw_bufp,
+ (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
@@ -1057,7 +1051,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
* CPPI ignores those BDs even though OWN is still set.
*/
completed = true;
- DBG(3, "rx short %d/%d (%d)\n",
+ dev_dbg(musb->controller, "rx short %d/%d (%d)\n",
len, bd->buflen,
rx->channel.actual_len);
}
@@ -1107,11 +1101,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
- DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+ dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
rx->index,
rx->head, rx->tail,
rx->last_processed
- ? rx->last_processed->dma
+ ? (unsigned long long)
+ rx->last_processed->dma
: 0,
completed ? ", completed" : "",
csr);
@@ -1144,17 +1139,33 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
return completed;
}
-void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+irqreturn_t cppi_interrupt(int irq, void *dev_id)
{
- void __iomem *tibase;
- int i, index;
+ struct musb *musb = dev_id;
struct cppi *cppi;
+ void __iomem *tibase;
struct musb_hw_ep *hw_ep = NULL;
+ u32 rx, tx;
+ int i, index;
+ unsigned long uninitialized_var(flags);
cppi = container_of(musb->dma_controller, struct cppi, controller);
+ if (cppi->irq)
+ spin_lock_irqsave(&musb->lock, flags);
tibase = musb->ctrl_base;
+ tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+ rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+ if (!tx && !rx) {
+ if (cppi->irq)
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return IRQ_NONE;
+ }
+
+ dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
+
/* process TX channels */
for (index = 0; tx; tx = tx >> 1, index++) {
struct cppi_channel *tx_ch;
@@ -1176,8 +1187,13 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
bd = tx_ch->head;
+ /*
+ * If Head is null then this could mean that a abort interrupt
+ * that needs to be acknowledged.
+ */
if (NULL == bd) {
- DBG(1, "null BD\n");
+ dev_dbg(musb->controller, "null BD\n");
+ musb_writel(&tx_ram->tx_complete, 0, 0);
continue;
}
@@ -1191,7 +1207,7 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
if (bd->hw_options & CPPI_OWN_SET)
break;
- DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
+ dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
@@ -1228,27 +1244,7 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
hw_ep = tx_ch->hw_ep;
- /* Peripheral role never repurposes the
- * endpoint, so immediate completion is
- * safe. Host role waits for the fifo
- * to empty (TXPKTRDY irq) before going
- * to the next queued bulk transfer.
- */
- if (is_host_active(cppi->musb)) {
-#if 0
- /* WORKAROUND because we may
- * not always get TXKPTRDY ...
- */
- int csr;
-
- csr = musb_readw(hw_ep->regs,
- MUSB_TXCSR);
- if (csr & MUSB_TXCSR_TXPKTRDY)
-#endif
- completed = false;
- }
- if (completed)
- musb_dma_completion(musb, index + 1, 1);
+ musb_dma_completion(musb, index + 1, 1);
} else {
/* Bigger transfer than we could fit in
@@ -1292,13 +1288,21 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
/* write to CPPI EOI register to re-enable interrupts */
musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+
+ if (cppi->irq)
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(cppi_interrupt);
/* Instantiate a software object representing a DMA controller. */
-struct dma_controller *__init
-dma_controller_create(struct musb *musb, void __iomem *mregs)
+struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *mregs)
{
struct cppi *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq_byname(pdev, "dma");
controller = kzalloc(sizeof *controller, GFP_KERNEL);
if (!controller)
@@ -1308,8 +1312,6 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
controller->tibase = mregs - DAVINCI_BASE_OFFSET;
controller->musb = musb;
- controller->controller.start = cppi_controller_start;
- controller->controller.stop = cppi_controller_stop;
controller->controller.channel_alloc = cppi_channel_allocate;
controller->controller.channel_release = cppi_channel_release;
controller->controller.channel_program = cppi_channel_program;
@@ -1329,6 +1331,16 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
return NULL;
}
+ if (irq > 0) {
+ if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ dma_controller_destroy(&controller->controller);
+ return NULL;
+ }
+ controller->irq = irq;
+ }
+
+ cppi_controller_start(controller);
return &controller->controller;
}
@@ -1341,6 +1353,11 @@ void dma_controller_destroy(struct dma_controller *c)
cppi = container_of(c, struct cppi, controller);
+ cppi_controller_stop(cppi);
+
+ if (cppi->irq)
+ free_irq(cppi->irq, cppi->musb);
+
/* assert: caller stopped the controller first */
dma_pool_destroy(cppi->pool);
@@ -1397,15 +1414,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
- int enabled;
-
- /* mask interrupts raised to signal teardown complete. */
- enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
- & (1 << cppi_ch->index);
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
- (1 << cppi_ch->index));
-
/* REVISIT put timeouts on these controller handshakes */
cppi_dump_tx(6, cppi_ch, " (teardown)");
@@ -1420,7 +1428,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
do {
value = musb_readl(&tx_ram->tx_complete, 0);
} while (0xFFFFFFFC != value);
- musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
/* FIXME clean up the transfer state ... here?
* the completion routine should get called with
@@ -1433,23 +1440,15 @@ static int cppi_channel_abort(struct dma_channel *channel)
musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, value);
- /* re-enable interrupt */
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
- (1 << cppi_ch->index));
-
- /* While we scrub the TX state RAM, ensure that we clean
- * up any interrupt that's currently asserted:
+ /*
* 1. Write to completion Ptr value 0x1(bit 0 set)
* (write back mode)
- * 2. Write to completion Ptr value 0x0(bit 0 cleared)
- * (compare mode)
- * Value written is compared(for bits 31:2) and when
- * equal, interrupt is deasserted.
+ * 2. Wait for abort interrupt and then put the channel in
+ * compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
- musb_writel(&tx_ram->tx_complete, 0, 0);
-
+ cppi_ch->head = NULL;
+ musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
/* REVISIT tx side _should_ clean up the same way
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 729b4071787..59bf949e589 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -5,7 +5,6 @@
#include <linux/slab.h>
#include <linux/list.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/dmapool.h>
@@ -119,6 +118,8 @@ struct cppi {
void __iomem *mregs; /* Mentor regs */
void __iomem *tibase; /* TI/CPPI regs */
+ int irq;
+
struct cppi_channel tx[4];
struct cppi_channel rx[4];
@@ -127,7 +128,7 @@ struct cppi {
struct list_head tx_complete;
};
-/* irq handling hook */
-extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+/* CPPI IRQ handler */
+extern irqreturn_t cppi_interrupt(int, void *);
#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
new file mode 100644
index 00000000000..058775e647a
--- /dev/null
+++ b/drivers/usb/musb/da8xx.c
@@ -0,0 +1,590 @@
+/*
+ * Texas Instruments DA8xx/OMAP-L1x "glue layer"
+ *
+ * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Based on the DaVinci "glue layer" code.
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+
+#include <mach/da8xx.h>
+#include <linux/platform_data/usb-davinci.h>
+
+#include "musb_core.h"
+
+/*
+ * DA8XX specific definitions
+ */
+
+/* USB 2.0 OTG module registers */
+#define DA8XX_USB_REVISION_REG 0x00
+#define DA8XX_USB_CTRL_REG 0x04
+#define DA8XX_USB_STAT_REG 0x08
+#define DA8XX_USB_EMULATION_REG 0x0c
+#define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */
+#define DA8XX_USB_AUTOREQ_REG 0x14
+#define DA8XX_USB_SRP_FIX_TIME_REG 0x18
+#define DA8XX_USB_TEARDOWN_REG 0x1c
+#define DA8XX_USB_INTR_SRC_REG 0x20
+#define DA8XX_USB_INTR_SRC_SET_REG 0x24
+#define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28
+#define DA8XX_USB_INTR_MASK_REG 0x2c
+#define DA8XX_USB_INTR_MASK_SET_REG 0x30
+#define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34
+#define DA8XX_USB_INTR_SRC_MASKED_REG 0x38
+#define DA8XX_USB_END_OF_INTR_REG 0x3c
+#define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2))
+
+/* Control register bits */
+#define DA8XX_SOFT_RESET_MASK 1
+
+#define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */
+#define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */
+
+/* USB interrupt register bits */
+#define DA8XX_INTR_USB_SHIFT 16
+#define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */
+ /* interrupts and DRVVBUS interrupt */
+#define DA8XX_INTR_DRVVBUS 0x100
+#define DA8XX_INTR_RX_SHIFT 8
+#define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT)
+#define DA8XX_INTR_TX_SHIFT 0
+#define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT)
+
+#define DA8XX_MENTOR_CORE_OFFSET 0x400
+
+#define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG)
+
+struct da8xx_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+ struct clk *clk;
+};
+
+/*
+ * REVISIT (PM): we should be able to keep the PHY in low power mode most
+ * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0
+ * and, when in host mode, autosuspending idle root ports... PHY_PLLON
+ * (overriding SUSPENDM?) then likely needs to stay off.
+ */
+
+static inline void phy_on(void)
+{
+ u32 cfgchip2 = __raw_readl(CFGCHIP2);
+
+ /*
+ * Start the on-chip PHY and its PLL.
+ */
+ cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN);
+ cfgchip2 |= CFGCHIP2_PHY_PLLON;
+ __raw_writel(cfgchip2, CFGCHIP2);
+
+ pr_info("Waiting for USB PHY clock good...\n");
+ while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
+ cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+ u32 cfgchip2 = __raw_readl(CFGCHIP2);
+
+ /*
+ * Ensure that USB 1.1 reference clock is not being sourced from
+ * USB 2.0 PHY. Otherwise do not power down the PHY.
+ */
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) &&
+ (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) {
+ pr_warning("USB 1.1 clocked from USB 2.0 PHY -- "
+ "can't power it down\n");
+ return;
+ }
+
+ /*
+ * Power down the on-chip PHY.
+ */
+ cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN;
+ __raw_writel(cfgchip2, CFGCHIP2);
+}
+
+/*
+ * Because we don't set CTRL.UINT, it's "important" to:
+ * - not read/write INTRUSB/INTRUSBE (except during
+ * initial setup, as a workaround);
+ * - use INTSET/INTCLR instead.
+ */
+
+/**
+ * da8xx_musb_enable - enable interrupts
+ */
+static void da8xx_musb_enable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 mask;
+
+ /* Workaround: setup IRQs through both register sets. */
+ mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) |
+ ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) |
+ DA8XX_INTR_USB_MASK;
+ musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask);
+
+ /* Force the DRVVBUS IRQ so we can start polling for ID change. */
+ musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG,
+ DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT);
+}
+
+/**
+ * da8xx_musb_disable - disable HDRC and flush interrupts
+ */
+static void da8xx_musb_disable(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+
+ musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG,
+ DA8XX_INTR_USB_MASK |
+ DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
+}
+
+#define portstate(stmt) stmt
+
+static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+}
+
+#define POLL_SECONDS 2
+
+static struct timer_list otg_workaround;
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /*
+ * We poll because DaVinci's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /*
+ * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3
+ * RTL seems to mis-handle session "start" otherwise (or in
+ * our case "recover"), in routine "VBUS was valid by the time
+ * VBUSERR got reported during enumeration" cases.
+ */
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG,
+ MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ /*
+ * There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.Session bit).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE: setting the session flag is _supposed_ to trigger
+ * SRP but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->state));
+ del_timer(&otg_workaround);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&otg_workaround, timeout);
+}
+
+static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
+{
+ struct musb *musb = hci;
+ void __iomem *reg_base = musb->ctrl_base;
+ struct usb_otg *otg = musb->xceiv->otg;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u32 status;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /*
+ * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through
+ * the Mentor registers (except for setup), use the TI ones and EOI.
+ */
+
+ /* Acknowledge and handle non-CPPI interrupts */
+ status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG);
+ if (!status)
+ goto eoi;
+
+ musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status);
+ dev_dbg(musb->controller, "USB IRQ %08x\n", status);
+
+ musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT;
+ musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT;
+ musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT;
+
+ /*
+ * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
+ * DA8xx's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.Session per Mentor docs requires that we know its
+ * value but DEVCTL.BDevice is invalid without DEVCTL.Session set.
+ * Also, DRVVBUS pulses for SRP (but not at 5 V)...
+ */
+ if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) {
+ int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err;
+
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
+ if (err) {
+ /*
+ * The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"-starting sessions
+ * without waiting for VBUS to stop registering in
+ * devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (drvvbus) {
+ MUSB_HST_MODE(musb);
+ otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&otg_workaround);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ otg->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ usb_otg_state_string(musb->xceiv->state),
+ err ? " ERROR" : "",
+ devctl);
+ ret = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ ret |= musb_interrupt(musb);
+
+ eoi:
+ /* EOI needs to be written for the IRQ to be re-asserted. */
+ if (ret == IRQ_HANDLED || status)
+ musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
+
+ /* Poll for ID change */
+ if (musb->xceiv->state == OTG_STATE_B_IDLE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
+{
+ u32 cfgchip2 = __raw_readl(CFGCHIP2);
+
+ cfgchip2 &= ~CFGCHIP2_OTGMODE;
+ switch (musb_mode) {
+ case MUSB_HOST: /* Force VBUS valid, ID = 0 */
+ cfgchip2 |= CFGCHIP2_FORCE_HOST;
+ break;
+ case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
+ cfgchip2 |= CFGCHIP2_FORCE_DEVICE;
+ break;
+ case MUSB_OTG: /* Don't override the VBUS/ID comparators */
+ cfgchip2 |= CFGCHIP2_NO_OVERRIDE;
+ break;
+ default:
+ dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode);
+ }
+
+ __raw_writel(cfgchip2, CFGCHIP2);
+ return 0;
+}
+
+static int da8xx_musb_init(struct musb *musb)
+{
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 rev;
+ int ret = -ENODEV;
+
+ musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
+
+ /* Returns zero if e.g. not clocked */
+ rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
+ if (!rev)
+ goto fail;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+
+ setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
+
+ /* Reset the controller */
+ musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
+
+ /* Start the on-chip PHY and its PLL. */
+ phy_on();
+
+ msleep(5);
+
+ /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */
+ pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n",
+ rev, __raw_readl(CFGCHIP2),
+ musb_readb(reg_base, DA8XX_USB_CTRL_REG));
+
+ musb->isr = da8xx_musb_interrupt;
+ return 0;
+fail:
+ return ret;
+}
+
+static int da8xx_musb_exit(struct musb *musb)
+{
+ del_timer_sync(&otg_workaround);
+
+ phy_off();
+
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+static const struct musb_platform_ops da8xx_ops = {
+ .init = da8xx_musb_init,
+ .exit = da8xx_musb_exit,
+
+ .enable = da8xx_musb_enable,
+ .disable = da8xx_musb_disable,
+
+ .set_mode = da8xx_musb_set_mode,
+ .try_idle = da8xx_musb_try_idle,
+
+ .set_vbus = da8xx_musb_set_vbus,
+};
+
+static const struct platform_device_info da8xx_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int da8xx_probe(struct platform_device *pdev)
+{
+ struct resource musb_resources[2];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct da8xx_glue *glue;
+ struct platform_device_info pinfo;
+ struct clk *clk;
+
+ int ret = -ENOMEM;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "failed to allocate glue context\n");
+ goto err0;
+ }
+
+ clk = clk_get(&pdev->dev, "usb20");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err3;
+ }
+
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err4;
+ }
+
+ glue->dev = &pdev->dev;
+ glue->clk = clk;
+
+ pdata->platform_ops = &da8xx_ops;
+
+ glue->phy = usb_phy_generic_register();
+ if (IS_ERR(glue->phy)) {
+ ret = PTR_ERR(glue->phy);
+ goto err5;
+ }
+ platform_set_drvdata(pdev, glue);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ pinfo = da8xx_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ goto err6;
+ }
+
+ return 0;
+
+err6:
+ usb_phy_generic_unregister(glue->phy);
+
+err5:
+ clk_disable(clk);
+
+err4:
+ clk_put(clk);
+
+err3:
+ kfree(glue);
+
+err0:
+ return ret;
+}
+
+static int da8xx_remove(struct platform_device *pdev)
+{
+ struct da8xx_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(glue->phy);
+ clk_disable(glue->clk);
+ clk_put(glue->clk);
+ kfree(glue);
+
+ return 0;
+}
+
+static struct platform_driver da8xx_driver = {
+ .probe = da8xx_probe,
+ .remove = da8xx_remove,
+ .driver = {
+ .name = "musb-da8xx",
+ },
+};
+
+MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer");
+MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(da8xx_driver);
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 2dc7606f319..de8492b06e4 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -24,30 +24,40 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <mach/cputype.h>
#include <mach/hardware.h>
-#include <mach/memory.h>
-#include <mach/gpio.h>
#include <asm/mach-types.h>
#include "musb_core.h"
#ifdef CONFIG_MACH_DAVINCI_EVM
-#define GPIO_nVBUS_DRV 87
+#define GPIO_nVBUS_DRV 160
#endif
#include "davinci.h"
#include "cppi_dma.h"
+#define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR)
+#define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR)
+
+struct davinci_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct clk *clk;
+};
+
/* REVISIT (PM) we should be able to keep the PHY in low power mode most
* of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
* and, when in host mode, autosuspending idle root ports... PHYPLLON
@@ -56,25 +66,31 @@
static inline void phy_on(void)
{
- /* start the on-chip PHY and its PLL */
- __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
- (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
- while ((__raw_readl((void __force __iomem *)
- IO_ADDRESS(USBPHY_CTL_PADDR))
- & USBPHY_PHYCLKGD) == 0)
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ /* power everything up; start the on-chip PHY and its PLL */
+ phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN);
+ phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
+
+ /* wait for PLL to lock before proceeding */
+ while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0)
cpu_relax();
}
static inline void phy_off(void)
{
- /* powerdown the on-chip PHY and its oscillator */
- __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
- IO_ADDRESS(USBPHY_CTL_PADDR));
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ /* powerdown the on-chip PHY, its PLL, and the OTG block */
+ phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON);
+ phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
}
static int dma_off = 1;
-void musb_platform_enable(struct musb *musb)
+static void davinci_musb_enable(struct musb *musb)
{
u32 tmp, old, val;
@@ -99,15 +115,14 @@ void musb_platform_enable(struct musb *musb)
dma_off = 0;
/* force a DRVVBUS irq so we can start polling for ID change */
- if (is_otg_enabled(musb))
- musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
}
/*
* Disable the HDRC and flush interrupts
*/
-void musb_platform_disable(struct musb *musb)
+static void davinci_musb_disable(struct musb *musb)
{
/* because we don't set CTRLR.UINT, "important" to:
* - not read/write INTRUSB/INTRUSBE
@@ -126,21 +141,21 @@ void musb_platform_disable(struct musb *musb)
}
-/* REVISIT it's not clear whether DaVinci can support full OTG. */
-
-static int vbus_state = -1;
-
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
#define portstate(stmt) stmt
-#else
-#define portstate(stmt)
-#endif
-
-/* VBUS SWITCHING IS BOARD-SPECIFIC */
+/*
+ * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM,
+ * which doesn't wire DRVVBUS to the FET that switches it. Unclear
+ * if that's a problem with the DM6446 chip or just with that board.
+ *
+ * In either case, the DM355 EVM automates DRVVBUS the normal way,
+ * when J10 is out, and TI documents it as handling OTG.
+ */
#ifdef CONFIG_MACH_DAVINCI_EVM
+static int vbus_state = -1;
+
/* I2C operations are always synchronous, and require a task context.
* With unloaded systems, using the shared workqueue seems to suffice
* to satisfy the 100msec A_WAIT_VRISE timeout...
@@ -150,12 +165,12 @@ static void evm_deferred_drvvbus(struct work_struct *ignored)
gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
vbus_state = !vbus_state;
}
-static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
#endif /* EVM */
-static void davinci_source_power(struct musb *musb, int is_on, int immediate)
+static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
{
+#ifdef CONFIG_MACH_DAVINCI_EVM
if (is_on)
is_on = 1;
@@ -163,22 +178,23 @@ static void davinci_source_power(struct musb *musb, int is_on, int immediate)
return;
vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
-#ifdef CONFIG_MACH_DAVINCI_EVM
if (machine_is_davinci_evm()) {
+ static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
+
if (immediate)
gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
else
schedule_work(&evm_vbus_work);
}
-#endif
if (immediate)
vbus_state = is_on;
+#endif
}
-static void davinci_set_vbus(struct musb *musb, int is_on)
+static void davinci_musb_set_vbus(struct musb *musb, int is_on)
{
WARN_ON(is_on && is_peripheral_active(musb));
- davinci_source_power(musb, is_on, 0);
+ davinci_musb_source_power(musb, is_on, 0);
}
@@ -197,10 +213,11 @@ static void otg_timer(unsigned long _musb)
* status change events (from the transceiver) otherwise.
*/
devctl = musb_readb(mregs, MUSB_DEVCTL);
- DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
+ dev_dbg(musb->controller, "poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->state));
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_VFALL:
/* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
* seems to mis-handle session "start" otherwise (or in our
@@ -211,15 +228,13 @@ static void otg_timer(unsigned long _musb)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
break;
}
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
break;
case OTG_STATE_B_IDLE:
- if (!is_peripheral_enabled(musb))
- break;
-
- /* There's no ID-changed IRQ, so we have no good way to tell
+ /*
+ * There's no ID-changed IRQ, so we have no good way to tell
* when to switch to the A-Default state machine (by setting
* the DEVCTL.SESSION flag).
*
@@ -236,7 +251,7 @@ static void otg_timer(unsigned long _musb)
if (devctl & MUSB_DEVCTL_BDEVICE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
else
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
break;
default:
break;
@@ -244,12 +259,14 @@ static void otg_timer(unsigned long _musb)
spin_unlock_irqrestore(&musb->lock, flags);
}
-static irqreturn_t davinci_interrupt(int irq, void *__hci)
+static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
{
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
struct musb *musb = __hci;
+ struct usb_otg *otg = musb->xceiv->otg;
void __iomem *tibase = musb->ctrl_base;
+ struct cppi *cppi;
u32 tmp;
spin_lock_irqsave(&musb->lock, flags);
@@ -257,7 +274,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
/* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
* the Mentor registers (except for setup), use the TI ones and EOI.
*
- * Docs describe irq "vector" registers asociated with the CPPI and
+ * Docs describe irq "vector" registers associated with the CPPI and
* USB EOI registers. These hold a bitmask corresponding to the
* current IRQ, not an irq handler address. Would using those bits
* resolve some of the races observed in this dispatch code??
@@ -266,21 +283,14 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
/* CPPI interrupts share the same IRQ line, but have their own
* mask, state, "vector", and EOI registers.
*/
- if (is_cppi_enabled()) {
- u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
- u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
-
- if (cppi_tx || cppi_rx) {
- DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
- cppi_completion(musb, cppi_rx, cppi_tx);
- retval = IRQ_HANDLED;
- }
- }
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+ if (is_cppi_enabled() && musb->dma_controller && !cppi->irq)
+ retval = cppi_interrupt(irq, __hci);
/* ack and handle non-CPPI interrupts */
tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
- DBG(4, "IRQ %08x\n", tmp);
+ dev_dbg(musb->controller, "IRQ %08x\n", tmp);
musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
>> DAVINCI_USB_RXINT_SHIFT;
@@ -302,8 +312,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
int err = musb->int_usb & MUSB_INTR_VBUSERROR;
- err = is_host_enabled(musb)
- && (musb->int_usb & MUSB_INTR_VBUSERROR);
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
if (err) {
/* The Mentor core doesn't debounce VBUS as needed
* to cope with device connect current spikes. This
@@ -316,29 +325,30 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
* to stop registering in devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
- } else if (is_host_enabled(musb) && drvvbus) {
- musb->is_active = 1;
+ } else if (drvvbus) {
MUSB_HST_MODE(musb);
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&otg_workaround);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ otg->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
}
- /* NOTE: this must complete poweron within 100 msec */
- davinci_source_power(musb, drvvbus, 0);
- DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
+ /* NOTE: this must complete poweron within 100 msec
+ * (OTG_TIME_A_WAIT_VRISE) but we don't check for that.
+ */
+ davinci_musb_source_power(musb, drvvbus, 0);
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
- otg_state_string(musb),
+ usb_otg_state_string(musb->xceiv->state),
err ? " ERROR" : "",
devctl);
retval = IRQ_HANDLED;
@@ -351,45 +361,63 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
/* poll for ID change */
- if (is_otg_enabled(musb)
- && musb->xceiv.state == OTG_STATE_B_IDLE)
+ if (musb->xceiv->state == OTG_STATE_B_IDLE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
- /* REVISIT we sometimes get unhandled IRQs
- * (e.g. ep0). not clear why...
- */
- if (retval != IRQ_HANDLED)
- DBG(5, "unhandled? %08x\n", tmp);
- return IRQ_HANDLED;
+ return retval;
}
-int musb_platform_set_mode(struct musb *musb, u8 mode)
+static int davinci_musb_set_mode(struct musb *musb, u8 mode)
{
/* EVM can't do this (right?) */
return -EIO;
}
-int __init musb_platform_init(struct musb *musb)
+static int davinci_musb_init(struct musb *musb)
{
void __iomem *tibase = musb->ctrl_base;
u32 revision;
+ int ret = -ENODEV;
- musb->mregs += DAVINCI_BASE_OFFSET;
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ ret = -EPROBE_DEFER;
+ goto unregister;
+ }
- clk_enable(musb->clock);
+ musb->mregs += DAVINCI_BASE_OFFSET;
/* returns zero if e.g. not clocked */
revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
if (revision == 0)
- return -ENODEV;
+ goto fail;
+
+ setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
- if (is_host_enabled(musb))
- setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+ davinci_musb_source_power(musb, 0, 1);
- musb->board_set_vbus = davinci_set_vbus;
- davinci_source_power(musb, 0, 1);
+ /* dm355 EVM swaps D+/D- for signal integrity, and
+ * is clocked from the main 24 MHz crystal.
+ */
+ if (machine_is_davinci_dm355_evm()) {
+ u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
+
+ phy_ctrl &= ~(3 << 9);
+ phy_ctrl |= USBPHY_DATAPOL;
+ __raw_writel(phy_ctrl, USB_PHY_CTRL);
+ }
+
+ /* On dm355, the default-A state machine needs DRVVBUS control.
+ * If we won't be a host, there's no need to turn it on.
+ */
+ if (cpu_is_davinci_dm355()) {
+ u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
+
+ deepsleep &= ~DRVVBUS_FORCE;
+ __raw_writel(deepsleep, DM355_DEEPSLEEP);
+ }
/* reset the controller */
musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
@@ -401,23 +429,36 @@ int __init musb_platform_init(struct musb *musb)
/* NOTE: irqs are in mixed mode, not bypass to pure-musb */
pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
- revision, __raw_readl((void __force __iomem *)
- IO_ADDRESS(USBPHY_CTL_PADDR)),
+ revision, __raw_readl(USB_PHY_CTRL),
musb_readb(tibase, DAVINCI_USB_CTRL_REG));
- musb->isr = davinci_interrupt;
+ musb->isr = davinci_musb_interrupt;
return 0;
+
+fail:
+ usb_put_phy(musb->xceiv);
+unregister:
+ usb_phy_generic_unregister();
+ return ret;
}
-int musb_platform_exit(struct musb *musb)
+static int davinci_musb_exit(struct musb *musb)
{
- if (is_host_enabled(musb))
- del_timer_sync(&otg_workaround);
+ del_timer_sync(&otg_workaround);
+
+ /* force VBUS off */
+ if (cpu_is_davinci_dm355()) {
+ u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
- davinci_source_power(musb, 0 /*off*/, 1);
+ deepsleep &= ~DRVVBUS_FORCE;
+ deepsleep |= DRVVBUS_OVERRIDE;
+ __raw_writel(deepsleep, DM355_DEEPSLEEP);
+ }
+
+ davinci_musb_source_power(musb, 0 /*off*/, 1);
/* delay, to avoid problems with module reload */
- if (is_host_enabled(musb) && musb->xceiv.default_a) {
+ if (musb->xceiv->otg->default_a) {
int maxdelay = 30;
u8 devctl, warn = 0;
@@ -430,7 +471,7 @@ int musb_platform_exit(struct musb *musb)
break;
if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
warn = devctl & MUSB_DEVCTL_VBUS;
- DBG(1, "VBUS %d\n",
+ dev_dbg(musb->controller, "VBUS %d\n",
warn >> MUSB_DEVCTL_VBUS_SHIFT);
}
msleep(1000);
@@ -439,12 +480,145 @@ int musb_platform_exit(struct musb *musb)
/* in OTG mode, another host might be connected */
if (devctl & MUSB_DEVCTL_VBUS)
- DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
+ dev_dbg(musb->controller, "VBUS off timeout (devctl %02x)\n", devctl);
}
phy_off();
- clk_disable(musb->clock);
+ usb_put_phy(musb->xceiv);
return 0;
}
+
+static const struct musb_platform_ops davinci_ops = {
+ .init = davinci_musb_init,
+ .exit = davinci_musb_exit,
+
+ .enable = davinci_musb_enable,
+ .disable = davinci_musb_disable,
+
+ .set_mode = davinci_musb_set_mode,
+
+ .set_vbus = davinci_musb_set_vbus,
+};
+
+static const struct platform_device_info davinci_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int davinci_probe(struct platform_device *pdev)
+{
+ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct davinci_glue *glue;
+ struct platform_device_info pinfo;
+ struct clk *clk;
+
+ int ret = -ENOMEM;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "failed to allocate glue context\n");
+ goto err0;
+ }
+
+ clk = clk_get(&pdev->dev, "usb");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err3;
+ }
+
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err4;
+ }
+
+ glue->dev = &pdev->dev;
+ glue->clk = clk;
+
+ pdata->platform_ops = &davinci_ops;
+
+ usb_phy_generic_register();
+ platform_set_drvdata(pdev, glue);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ /*
+ * For DM6467 3 resources are passed. A placeholder for the 3rd
+ * resource is always there, so it's safe to always copy it...
+ */
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
+ pinfo = davinci_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ goto err5;
+ }
+
+ return 0;
+
+err5:
+ clk_disable(clk);
+
+err4:
+ clk_put(clk);
+
+err3:
+ kfree(glue);
+
+err0:
+ return ret;
+}
+
+static int davinci_remove(struct platform_device *pdev)
+{
+ struct davinci_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister();
+ clk_disable(glue->clk);
+ clk_put(glue->clk);
+ kfree(glue);
+
+ return 0;
+}
+
+static struct platform_driver davinci_driver = {
+ .probe = davinci_probe,
+ .remove = davinci_remove,
+ .driver = {
+ .name = "musb-davinci",
+ },
+};
+
+MODULE_DESCRIPTION("DaVinci MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(davinci_driver);
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 7fb6238e270..371baa0ee50 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,14 +15,21 @@
*/
/* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
-#define USBPHY_PHYCLKGD (1 << 8)
-#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
-#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
-#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
-#define USBPHY_CLKO1SEL (1 << 3)
-#define USBPHY_OSCPDWN (1 << 2)
-#define USBPHY_PHYPDWN (1 << 0)
+#define USBPHY_CTL_PADDR 0x01c40034
+#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
+#define USBPHY_PHYCLKGD BIT(8)
+#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
+#define USBPHY_VBDTCTEN BIT(6) /* v(bus) comparator */
+#define USBPHY_VBUSSENS BIT(5) /* (dm355,ro) is vbus > 0.5V */
+#define USBPHY_PHYPLLON BIT(4) /* override pll suspend */
+#define USBPHY_CLKO1SEL BIT(3)
+#define USBPHY_OSCPDWN BIT(2)
+#define USBPHY_OTGPDWN BIT(1)
+#define USBPHY_PHYPDWN BIT(0)
+
+#define DM355_DEEPSLEEP_PADDR 0x01c40048
+#define DRVVBUS_FORCE BIT(2)
+#define DRVVBUS_OVERRIDE BIT(1)
/* For now include usb OTG module registers here */
#define DAVINCI_USB_VERSION_REG 0x00
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
new file mode 100644
index 00000000000..5f30537f192
--- /dev/null
+++ b/drivers/usb/musb/jz4740.c
@@ -0,0 +1,201 @@
+/*
+ * Ingenic JZ4740 "glue layer"
+ *
+ * Copyright (C) 2013, Apelete Seketeli <apelete@seketeli.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+struct jz4740_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct clk *clk;
+};
+
+static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ /*
+ * The controller is gadget only, the state of the host mode IRQ bits is
+ * undefined. Mask them to make sure that the musb driver core will
+ * never see them set
+ */
+ musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
+ MUSB_INTR_RESET | MUSB_INTR_SOF;
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
+};
+
+static struct musb_hdrc_config jz4740_musb_config = {
+ /* Silicon does not implement USB OTG. */
+ .multipoint = 0,
+ /* Max EPs scanned, driver will decide which EP can be used. */
+ .num_eps = 4,
+ /* RAMbits needed to configure EPs from table */
+ .ram_bits = 9,
+ .fifo_cfg = jz4740_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
+};
+
+static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
+ .mode = MUSB_PERIPHERAL,
+ .config = &jz4740_musb_config,
+};
+
+static int jz4740_musb_init(struct musb *musb)
+{
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!musb->xceiv) {
+ pr_err("HS UDC: no transceiver configured\n");
+ return -ENODEV;
+ }
+
+ /* Silicon does not implement ConfigData register.
+ * Set dyn_fifo to avoid reading EP config from hardware.
+ */
+ musb->dyn_fifo = true;
+
+ musb->isr = jz4740_musb_interrupt;
+
+ return 0;
+}
+
+static int jz4740_musb_exit(struct musb *musb)
+{
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+static const struct musb_platform_ops jz4740_musb_ops = {
+ .init = jz4740_musb_init,
+ .exit = jz4740_musb_exit,
+};
+
+static int jz4740_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata = &jz4740_musb_platform_data;
+ struct platform_device *musb;
+ struct jz4740_glue *glue;
+ struct clk *clk;
+ int ret;
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_get(&pdev->dev, "udc");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err_platform_device_put;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err_platform_device_put;
+ }
+
+ musb->dev.parent = &pdev->dev;
+
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+ glue->clk = clk;
+
+ pdata->platform_ops = &jz4740_musb_ops;
+
+ platform_set_drvdata(pdev, glue);
+
+ ret = platform_device_add_resources(musb, pdev->resource,
+ pdev->num_resources);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+ goto err_clk_disable;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err_clk_disable;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(clk);
+err_platform_device_put:
+ platform_device_put(musb);
+ return ret;
+}
+
+static int jz4740_remove(struct platform_device *pdev)
+{
+ struct jz4740_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ clk_disable_unprepare(glue->clk);
+
+ return 0;
+}
+
+static struct platform_driver jz4740_driver = {
+ .probe = jz4740_probe,
+ .remove = jz4740_remove,
+ .driver = {
+ .name = "musb-jz4740",
+ },
+};
+
+MODULE_DESCRIPTION("JZ4740 MUSB Glue Layer");
+MODULE_AUTHOR("Apelete Seketeli <apelete@seketeli.net>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(jz4740_driver);
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
new file mode 100644
index 00000000000..1e58ed2361c
--- /dev/null
+++ b/drivers/usb/musb/musb_am335x.c
@@ -0,0 +1,43 @@
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+static int am335x_child_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static const struct of_device_id am335x_child_of_match[] = {
+ { .compatible = "ti,am33xx-usb" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, am335x_child_of_match);
+
+static struct platform_driver am335x_child_driver = {
+ .probe = am335x_child_probe,
+ .driver = {
+ .name = "am335x-usb-childs",
+ .of_match_table = am335x_child_of_match,
+ },
+};
+
+static int __init am335x_child_init(void)
+{
+ return platform_driver_register(&am335x_child_driver);
+}
+module_init(am335x_child_init);
+
+MODULE_DESCRIPTION("AM33xx child devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index af77e465900..eff3c5cf84f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -83,7 +83,7 @@
* This gets many kinds of configuration information:
* - Kconfig for everything user-configurable
* - platform_device for addressing, irq, and platform_data
- * - platform_data is mostly for board-specific informarion
+ * - platform_data is mostly for board-specific information
* (plus recentrly, SOC or family details)
*
* Most of the conditional compilation will (someday) vanish.
@@ -93,31 +93,18 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/kobject.h>
+#include <linux/prefetch.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-
-#ifdef CONFIG_ARM
-#include <mach/hardware.h>
-#include <mach/memory.h>
-#include <asm/mach-types.h>
-#endif
+#include <linux/dma-mapping.h>
#include "musb_core.h"
-
-#ifdef CONFIG_ARCH_DAVINCI
-#include "davinci.h"
-#endif
-
+#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
-unsigned musb_debug;
-module_param_named(debug, musb_debug, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug message level. Default = 0");
-
#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
@@ -125,7 +112,7 @@ MODULE_PARM_DESC(debug, "Debug message level. Default = 0");
#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
-#define MUSB_DRIVER_NAME "musb_hdrc"
+#define MUSB_DRIVER_NAME "musb-hdrc"
const char musb_driver_name[] = MUSB_DRIVER_NAME;
MODULE_DESCRIPTION(DRIVER_INFO);
@@ -138,28 +125,121 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
static inline struct musb *dev_to_musb(struct device *dev)
{
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- /* usbcore insists dev->driver_data is a "struct hcd *" */
- return hcd_to_musb(dev_get_drvdata(dev));
-#else
return dev_get_drvdata(dev);
-#endif
}
/*-------------------------------------------------------------------------*/
-#if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN)
+#ifndef CONFIG_BLACKFIN
+static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
+{
+ void __iomem *addr = phy->io_priv;
+ int i = 0;
+ u8 r;
+ u8 power;
+ int ret;
+
+ pm_runtime_get_sync(phy->io_dev);
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
+ * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
+ */
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
+ MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ }
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+ ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
+
+out:
+ pm_runtime_put(phy->io_dev);
+
+ return ret;
+}
+
+static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
+{
+ void __iomem *addr = phy->io_priv;
+ int i = 0;
+ u8 r = 0;
+ u8 power;
+ int ret = 0;
+
+ pm_runtime_get_sync(phy->io_dev);
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+ musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ }
+
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+out:
+ pm_runtime_put(phy->io_dev);
+
+ return ret;
+}
+#else
+#define musb_ulpi_read NULL
+#define musb_ulpi_write NULL
+#endif
+
+static struct usb_phy_io_ops musb_ulpi_access = {
+ .read = musb_ulpi_read,
+ .write = musb_ulpi_write,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN)
/*
* Load an endpoint's FIFO
*/
void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
{
+ struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
+ if (unlikely(len == 0))
+ return;
+
prefetch((u8 *)src);
- DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'T', hw_ep->epnum, fifo, len, src);
/* we can't assume unaligned reads work */
@@ -169,7 +249,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
/* best case is 32bit-aligned source address */
if ((0x02 & (unsigned long) src) == 0) {
if (len >= 4) {
- writesl(fifo, src + index, len >> 2);
+ iowrite32_rep(fifo, src + index, len >> 2);
index += len & ~0x03;
}
if (len & 0x02) {
@@ -178,7 +258,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
}
} else {
if (len >= 2) {
- writesw(fifo, src + index, len >> 1);
+ iowrite16_rep(fifo, src + index, len >> 1);
index += len & ~0x01;
}
}
@@ -186,18 +266,23 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
musb_writeb(fifo, 0, src[index]);
} else {
/* byte aligned */
- writesb(fifo, src, len);
+ iowrite8_rep(fifo, src, len);
}
}
+#if !defined(CONFIG_USB_MUSB_AM35X)
/*
* Unload an endpoint's FIFO
*/
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
+ struct musb *musb = hw_ep->musb;
void __iomem *fifo = hw_ep->fifo;
- DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ if (unlikely(len == 0))
+ return;
+
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'R', hw_ep->epnum, fifo, len, dst);
/* we can't assume unaligned writes work */
@@ -207,7 +292,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
/* best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) dst) == 0) {
if (len >= 4) {
- readsl(fifo, dst, len >> 2);
+ ioread32_rep(fifo, dst, len >> 2);
index = len & ~0x03;
}
if (len & 0x02) {
@@ -216,7 +301,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
}
} else {
if (len >= 2) {
- readsw(fifo, dst, len >> 1);
+ ioread16_rep(fifo, dst, len >> 1);
index = len & ~0x01;
}
}
@@ -224,9 +309,10 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
dst[index] = musb_readb(fifo, 0);
} else {
/* byte aligned */
- readsb(fifo, dst, len);
+ ioread8_rep(fifo, dst, len);
}
}
+#endif
#endif /* normal PIO */
@@ -265,86 +351,59 @@ void musb_load_testpacket(struct musb *musb)
/*-------------------------------------------------------------------------*/
-const char *otg_state_string(struct musb *musb)
-{
- switch (musb->xceiv.state) {
- case OTG_STATE_A_IDLE: return "a_idle";
- case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
- case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
- case OTG_STATE_A_HOST: return "a_host";
- case OTG_STATE_A_SUSPEND: return "a_suspend";
- case OTG_STATE_A_PERIPHERAL: return "a_peripheral";
- case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall";
- case OTG_STATE_A_VBUS_ERR: return "a_vbus_err";
- case OTG_STATE_B_IDLE: return "b_idle";
- case OTG_STATE_B_SRP_INIT: return "b_srp_init";
- case OTG_STATE_B_PERIPHERAL: return "b_peripheral";
- case OTG_STATE_B_WAIT_ACON: return "b_wait_acon";
- case OTG_STATE_B_HOST: return "b_host";
- default: return "UNDEFINED";
- }
-}
-
-#ifdef CONFIG_USB_MUSB_OTG
-
-/*
- * See also USB_OTG_1-3.pdf 6.6.5 Timers
- * REVISIT: Are the other timers done in the hardware?
- */
-#define TB_ASE0_BRST 100 /* Min 3.125 ms */
-
/*
* Handles OTG hnp timeouts, such as b_ase0_brst
*/
-void musb_otg_timer_func(unsigned long data)
+static void musb_otg_timer_func(unsigned long data)
{
struct musb *musb = (struct musb *)data;
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_WAIT_ACON:
- DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+ dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n");
musb_g_disconnect(musb);
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 0;
break;
+ case OTG_STATE_A_SUSPEND:
case OTG_STATE_A_WAIT_BCON:
- DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
- musb_hnp_stop(musb);
+ dev_dbg(musb->controller, "HNP: %s timeout\n",
+ usb_otg_state_string(musb->xceiv->state));
+ musb_platform_set_vbus(musb, 0);
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
break;
default:
- DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
+ dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
+ usb_otg_state_string(musb->xceiv->state));
}
- musb->ignore_disconnect = 0;
spin_unlock_irqrestore(&musb->lock, flags);
}
-static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
-
/*
- * Stops the B-device HNP state. Caller must take care of locking.
+ * Stops the HNP transition. Caller must take care of locking.
*/
void musb_hnp_stop(struct musb *musb)
{
- struct usb_hcd *hcd = musb_to_hcd(musb);
+ struct usb_hcd *hcd = musb->hcd;
void __iomem *mbase = musb->mregs;
u8 reg;
- switch (musb->xceiv.state) {
+ dev_dbg(musb->controller, "HNP: stop from %s\n",
+ usb_otg_state_string(musb->xceiv->state));
+
+ switch (musb->xceiv->state) {
case OTG_STATE_A_PERIPHERAL:
- case OTG_STATE_A_WAIT_VFALL:
- case OTG_STATE_A_WAIT_BCON:
- DBG(1, "HNP: Switching back to A-host\n");
musb_g_disconnect(musb);
- musb->xceiv.state = OTG_STATE_A_IDLE;
- MUSB_HST_MODE(musb);
- musb->is_active = 0;
+ dev_dbg(musb->controller, "HNP: back to %s\n",
+ usb_otg_state_string(musb->xceiv->state));
break;
case OTG_STATE_B_HOST:
- DBG(1, "HNP: Disabling HR\n");
- hcd->self.is_b_host = 0;
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ dev_dbg(musb->controller, "HNP: Disabling HR\n");
+ if (hcd)
+ hcd->self.is_b_host = 0;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
MUSB_DEV_MODE(musb);
reg = musb_readb(mbase, MUSB_POWER);
reg |= MUSB_POWER_SUSPENDM;
@@ -352,8 +411,8 @@ void musb_hnp_stop(struct musb *musb)
/* REVISIT: Start SESSION_REQUEST here? */
break;
default:
- DBG(1, "HNP: Stopping in unknown state %s\n",
- otg_state_string(musb));
+ dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n",
+ usb_otg_state_string(musb->xceiv->state));
}
/*
@@ -361,12 +420,9 @@ void musb_hnp_stop(struct musb *musb)
* which cause occasional OPT A "Did not receive reset after connect"
* errors.
*/
- musb->port1_status &=
- ~(1 << USB_PORT_FEAT_C_CONNECTION);
+ musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
}
-#endif
-
/*
* Interrupt Service Routine to record USB "global" interrupts.
* Since these do not happen often and signify things of
@@ -379,17 +435,12 @@ void musb_hnp_stop(struct musb *musb)
* @param power
*/
-#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
- | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
- | MUSB_INTR_RESET)
-
static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
- u8 devctl, u8 power)
+ u8 devctl)
{
irqreturn_t handled = IRQ_NONE;
- void __iomem *mbase = musb->mregs;
- DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
+ dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
int_usb);
/* in host mode, the peripheral may issue remote wakeup.
@@ -398,20 +449,23 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
if (int_usb & MUSB_INTR_RESUME) {
handled = IRQ_HANDLED;
- DBG(3, "RESUME (%s)\n", otg_state_string(musb));
+ dev_dbg(musb->controller, "RESUME (%s)\n", usb_otg_state_string(musb->xceiv->state));
if (devctl & MUSB_DEVCTL_HM) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- switch (musb->xceiv.state) {
+ void __iomem *mbase = musb->mregs;
+ u8 power;
+
+ switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
/* remote wakeup? later, GetPortStatus
* will stop RESUME signaling
*/
+ power = musb_readb(musb->mregs, MUSB_POWER);
if (power & MUSB_POWER_SUSPENDM) {
/* spurious */
musb->int_usb &= ~MUSB_INTR_SUSPEND;
- DBG(2, "Spurious SUSPENDM\n");
+ dev_dbg(musb->controller, "Spurious SUSPENDM\n");
break;
}
@@ -423,33 +477,32 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
musb->rh_timer = jiffies
- + msecs_to_jiffies(20);
+ + msecs_to_jiffies(20);
+ schedule_delayed_work(
+ &musb->finish_resume_work,
+ msecs_to_jiffies(20));
- musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->xceiv->state = OTG_STATE_A_HOST;
musb->is_active = 1;
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb_host_resume_root_hub(musb);
break;
case OTG_STATE_B_WAIT_ACON:
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 1;
MUSB_DEV_MODE(musb);
break;
default:
WARNING("bogus %s RESUME (%s)\n",
"host",
- otg_state_string(musb));
+ usb_otg_state_string(musb->xceiv->state));
}
-#endif
} else {
- switch (musb->xceiv.state) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
/* possibly DISCONNECT is upcoming */
- musb->xceiv.state = OTG_STATE_A_HOST;
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb->xceiv->state = OTG_STATE_A_HOST;
+ musb_host_resume_root_hub(musb);
break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_PERIPHERAL:
/* disconnect while suspended? we may
@@ -467,19 +520,26 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
case OTG_STATE_B_IDLE:
musb->int_usb &= ~MUSB_INTR_SUSPEND;
break;
-#endif
default:
WARNING("bogus %s RESUME (%s)\n",
"peripheral",
- otg_state_string(musb));
+ usb_otg_state_string(musb->xceiv->state));
}
}
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* see manual for the order of the tests */
if (int_usb & MUSB_INTR_SESSREQ) {
- DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
+ void __iomem *mbase = musb->mregs;
+
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
+ && (devctl & MUSB_DEVCTL_BDEVICE)) {
+ dev_dbg(musb->controller, "SessReq while on B state\n");
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n",
+ usb_otg_state_string(musb->xceiv->state));
/* IRQ arrives from ID pin sense or (later, if VBUS power
* is removed) SRP. responses are time critical:
@@ -490,9 +550,9 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
musb->ep0_stage = MUSB_EP0_START;
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
- musb_set_vbus(musb, 1);
+ musb_platform_set_vbus(musb, 1);
handled = IRQ_HANDLED;
}
@@ -516,7 +576,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
* REVISIT: do delays from lots of DEBUG_KERNEL checks
* make trouble here, keeping VBUS < 4.4V ?
*/
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_HOST:
/* recovery is dicey once we've gotten past the
* initial stages of enumeration, but if VBUS
@@ -527,22 +587,25 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
case OTG_STATE_A_WAIT_BCON:
case OTG_STATE_A_WAIT_VRISE:
if (musb->vbuserr_retry) {
+ void __iomem *mbase = musb->mregs;
+
musb->vbuserr_retry--;
ignore = 1;
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(mbase, MUSB_DEVCTL, devctl);
} else {
musb->port1_status |=
- (1 << USB_PORT_FEAT_OVER_CURRENT)
- | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ USB_PORT_STAT_OVERCURRENT
+ | (USB_PORT_STAT_C_OVERCURRENT << 16);
}
break;
default:
break;
}
- DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
- otg_state_string(musb),
+ dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
+ "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+ usb_otg_state_string(musb->xceiv->state),
devctl,
({ char *s;
switch (devctl & MUSB_DEVCTL_VBUS) {
@@ -555,34 +618,89 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
/* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
default:
s = "VALID"; break;
- }; s; }),
+ } s; }),
VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
musb->port1_status);
/* go through A_WAIT_VFALL then start a new session */
if (!ignore)
- musb_set_vbus(musb, 0);
+ musb_platform_set_vbus(musb, 0);
handled = IRQ_HANDLED;
}
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->state), devctl);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_PERIPHERAL:
+ /* We also come here if the cable is removed, since
+ * this silicon doesn't report ID-no-longer-grounded.
+ *
+ * We depend on T(a_wait_bcon) to shut us down, and
+ * hope users don't do anything dicey during this
+ * undesired detour through A_WAIT_BCON.
+ */
+ musb_hnp_stop(musb);
+ musb_host_resume_root_hub(musb);
+ musb_root_disconnect(musb);
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon
+ ? : OTG_TIME_A_WAIT_BCON));
+
+ break;
+ case OTG_STATE_B_IDLE:
+ if (!musb->is_active)
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = musb->g.b_hnp_enable;
+ if (musb->is_active) {
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_B_ASE0_BRST));
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = musb->hcd->self.b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+ }
+
if (int_usb & MUSB_INTR_CONNECT) {
- struct usb_hcd *hcd = musb_to_hcd(musb);
+ struct usb_hcd *hcd = musb->hcd;
handled = IRQ_HANDLED;
musb->is_active = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
musb->ep0_stage = MUSB_EP0_START;
-#ifdef CONFIG_USB_MUSB_OTG
/* flush endpoints when transitioning from Device Mode */
if (is_peripheral_active(musb)) {
/* REVISIT HNP; just force disconnect */
}
- musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
- musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
- musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
-#endif
+ musb->intrtxe = musb->epmask;
+ musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
+ musb->intrrxe = musb->epmask & 0xfffe;
+ musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
+ musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
|USB_PORT_STAT_HIGH_SPEED
|USB_PORT_STAT_ENABLE
@@ -594,47 +712,91 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if (devctl & MUSB_DEVCTL_LSDEV)
musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
- if (hcd->status_urb)
- usb_hcd_poll_rh_status(hcd);
- else
- usb_hcd_resume_root_hub(hcd);
-
- MUSB_HST_MODE(musb);
-
/* indicate new connection to OTG machine */
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_PERIPHERAL:
if (int_usb & MUSB_INTR_SUSPEND) {
- DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
- musb->xceiv.state = OTG_STATE_B_HOST;
- hcd->self.is_b_host = 1;
+ dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
int_usb &= ~MUSB_INTR_SUSPEND;
+ goto b_host;
} else
- DBG(1, "CONNECT as b_peripheral???\n");
+ dev_dbg(musb->controller, "CONNECT as b_peripheral???\n");
break;
case OTG_STATE_B_WAIT_ACON:
- DBG(1, "HNP: Waiting to switch to b_host state\n");
- musb->xceiv.state = OTG_STATE_B_HOST;
- hcd->self.is_b_host = 1;
+ dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
+b_host:
+ musb->xceiv->state = OTG_STATE_B_HOST;
+ if (musb->hcd)
+ musb->hcd->self.is_b_host = 1;
+ del_timer(&musb->otg_timer);
break;
default:
if ((devctl & MUSB_DEVCTL_VBUS)
== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
- musb->xceiv.state = OTG_STATE_A_HOST;
- hcd->self.is_b_host = 0;
+ musb->xceiv->state = OTG_STATE_A_HOST;
+ if (hcd)
+ hcd->self.is_b_host = 0;
}
break;
}
- DBG(1, "CONNECT (%s) devctl %02x\n",
- otg_state_string(musb), devctl);
+
+ musb_host_poke_root_hub(musb);
+
+ dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->state), devctl);
+ }
+
+ if (int_usb & MUSB_INTR_DISCONNECT) {
+ dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->state),
+ MUSB_MODE(musb), devctl);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb_host_resume_root_hub(musb);
+ musb_root_disconnect(musb);
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_B_HOST:
+ /* REVISIT this behaves for "real disconnect"
+ * cases; make sure the other transitions from
+ * from B_HOST act right too. The B_HOST code
+ * in hnp_stop() is currently not used...
+ */
+ musb_root_disconnect(musb);
+ if (musb->hcd)
+ musb->hcd->self.is_b_host = 0;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ musb_g_disconnect(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ musb_root_disconnect(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_B_WAIT_ACON:
+ /* FALLTHROUGH */
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_g_disconnect(musb);
+ break;
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+ usb_otg_state_string(musb->xceiv->state));
+ break;
+ }
}
-#endif /* CONFIG_USB_MUSB_HDRC_HCD */
/* mentor saves a bit: bus reset and babble share the same irq.
* only host sees babble; only peripheral sees bus reset.
*/
if (int_usb & MUSB_INTR_RESET) {
- if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
+ handled = IRQ_HANDLED;
+ if ((devctl & MUSB_DEVCTL_HM) != 0) {
/*
* Looks like non-HS BABBLE can be ignored, but
* HS BABBLE is an error condition. For HS the solution
@@ -643,74 +805,52 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
* stop the session.
*/
if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
- DBG(1, "BABBLE devctl: %02x\n", devctl);
+ dev_dbg(musb->controller, "BABBLE devctl: %02x\n", devctl);
else {
ERR("Stopping host session -- babble\n");
- musb_writeb(mbase, MUSB_DEVCTL, 0);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
}
- } else if (is_peripheral_capable()) {
- DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
- switch (musb->xceiv.state) {
-#ifdef CONFIG_USB_OTG
+ } else {
+ dev_dbg(musb->controller, "BUS RESET as %s\n",
+ usb_otg_state_string(musb->xceiv->state));
+ switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
- /* We need to ignore disconnect on suspend
- * otherwise tusb 2.0 won't reconnect after a
- * power cycle, which breaks otg compliance.
- */
- musb->ignore_disconnect = 1;
musb_g_reset(musb);
/* FALLTHROUGH */
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
- DBG(1, "HNP: Setting timer as %s\n",
- otg_state_string(musb));
- musb_otg_timer.data = (unsigned long)musb;
- mod_timer(&musb_otg_timer, jiffies
- + msecs_to_jiffies(100));
+ /* never use invalid T(a_wait_bcon) */
+ dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
+ usb_otg_state_string(musb->xceiv->state),
+ TA_WAIT_BCON(musb));
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(TA_WAIT_BCON(musb)));
break;
case OTG_STATE_A_PERIPHERAL:
- musb_hnp_stop(musb);
+ del_timer(&musb->otg_timer);
+ musb_g_reset(musb);
break;
case OTG_STATE_B_WAIT_ACON:
- DBG(1, "HNP: RESET (%s), to b_peripheral\n",
- otg_state_string(musb));
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
+ usb_otg_state_string(musb->xceiv->state));
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb_g_reset(musb);
break;
-#endif
case OTG_STATE_B_IDLE:
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
/* FALLTHROUGH */
case OTG_STATE_B_PERIPHERAL:
musb_g_reset(musb);
break;
default:
- DBG(1, "Unhandled BUS RESET as %s\n",
- otg_state_string(musb));
+ dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n",
+ usb_otg_state_string(musb->xceiv->state));
}
}
-
- handled = IRQ_HANDLED;
}
- schedule_work(&musb->irq_work);
- return handled;
-}
-
-/*
- * Interrupt Service Routine to record USB "global" interrupts.
- * Since these do not happen often and signify things of
- * paramount importance, it seems OK to check them individually;
- * the order of the tests is specified in the manual
- *
- * @param musb instance pointer
- * @param int_usb register contents
- * @param devctl
- * @param power
- */
-static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
- u8 devctl, u8 power)
-{
- irqreturn_t handled = IRQ_NONE;
+ /* handle babble condition */
+ if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb))
+ schedule_work(&musb->recover_work);
#if 0
/* REVISIT ... this would be for multiplexing periodic endpoints, or
@@ -730,7 +870,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
u8 epnum;
u16 frame;
- DBG(6, "START_OF_FRAME\n");
+ dev_dbg(musb->controller, "START_OF_FRAME\n");
handled = IRQ_HANDLED;
/* start any periodic Tx transfers waiting for current frame */
@@ -757,178 +897,81 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
}
#endif
- if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
- DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
- otg_state_string(musb),
- MUSB_MODE(musb), devctl);
- handled = IRQ_HANDLED;
+ schedule_work(&musb->irq_work);
- switch (musb->xceiv.state) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- case OTG_STATE_A_HOST:
- case OTG_STATE_A_SUSPEND:
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
- musb_root_disconnect(musb);
- if (musb->a_wait_bcon != 0)
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon));
- break;
-#endif /* HOST */
-#ifdef CONFIG_USB_MUSB_OTG
- case OTG_STATE_B_HOST:
- musb_hnp_stop(musb);
- break;
- case OTG_STATE_A_PERIPHERAL:
- musb_hnp_stop(musb);
- musb_root_disconnect(musb);
- /* FALLTHROUGH */
- case OTG_STATE_B_WAIT_ACON:
- /* FALLTHROUGH */
-#endif /* OTG */
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- case OTG_STATE_B_PERIPHERAL:
- case OTG_STATE_B_IDLE:
- musb_g_disconnect(musb);
- break;
-#endif /* GADGET */
- default:
- WARNING("unhandled DISCONNECT transition (%s)\n",
- otg_state_string(musb));
- break;
- }
+ return handled;
+}
- schedule_work(&musb->irq_work);
- }
+/*-------------------------------------------------------------------------*/
- if (int_usb & MUSB_INTR_SUSPEND) {
- DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
- otg_state_string(musb), devctl, power);
- handled = IRQ_HANDLED;
+static void musb_generic_disable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u16 temp;
- switch (musb->xceiv.state) {
-#ifdef CONFIG_USB_MUSB_OTG
- case OTG_STATE_A_PERIPHERAL:
- /*
- * We cannot stop HNP here, devctl BDEVICE might be
- * still set.
- */
- break;
-#endif
- case OTG_STATE_B_PERIPHERAL:
- musb_g_suspend(musb);
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.gadget->b_hnp_enable;
- if (musb->is_active) {
-#ifdef CONFIG_USB_MUSB_OTG
- musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
- DBG(1, "HNP: Setting timer for b_ase0_brst\n");
- musb_otg_timer.data = (unsigned long)musb;
- mod_timer(&musb_otg_timer, jiffies
- + msecs_to_jiffies(TB_ASE0_BRST));
-#endif
- }
- break;
- case OTG_STATE_A_WAIT_BCON:
- if (musb->a_wait_bcon != 0)
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon));
- break;
- case OTG_STATE_A_HOST:
- musb->xceiv.state = OTG_STATE_A_SUSPEND;
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.host->b_hnp_enable;
- break;
- case OTG_STATE_B_HOST:
- /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
- DBG(1, "REVISIT: SUSPEND as B_HOST\n");
- break;
- default:
- /* "should not happen" */
- musb->is_active = 0;
- break;
- }
- schedule_work(&musb->irq_work);
- }
+ /* disable interrupts */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0);
+ musb->intrtxe = 0;
+ musb_writew(mbase, MUSB_INTRTXE, 0);
+ musb->intrrxe = 0;
+ musb_writew(mbase, MUSB_INTRRXE, 0);
+ /* off */
+ musb_writeb(mbase, MUSB_DEVCTL, 0);
- return handled;
-}
+ /* flush pending interrupts */
+ temp = musb_readb(mbase, MUSB_INTRUSB);
+ temp = musb_readw(mbase, MUSB_INTRTX);
+ temp = musb_readw(mbase, MUSB_INTRRX);
-/*-------------------------------------------------------------------------*/
+}
/*
-* Program the HDRC to start (enable interrupts, dma, etc.).
-*/
+ * Program the HDRC to start (enable interrupts, dma, etc.).
+ */
void musb_start(struct musb *musb)
{
- void __iomem *regs = musb->mregs;
- u8 devctl = musb_readb(regs, MUSB_DEVCTL);
+ void __iomem *regs = musb->mregs;
+ u8 devctl = musb_readb(regs, MUSB_DEVCTL);
- DBG(2, "<== devctl %02x\n", devctl);
+ dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
/* Set INT enable registers, enable interrupts */
- musb_writew(regs, MUSB_INTRTXE, musb->epmask);
- musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb->intrtxe = musb->epmask;
+ musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
+ musb->intrrxe = musb->epmask & 0xfffe;
+ musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
musb_writeb(regs, MUSB_TESTMODE, 0);
/* put into basic highspeed mode and start session */
musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
- | MUSB_POWER_SOFTCONN
- | MUSB_POWER_HSENAB
- /* ENSUSPEND wedges tusb */
- /* | MUSB_POWER_ENSUSPEND */
- );
+ | MUSB_POWER_HSENAB
+ /* ENSUSPEND wedges tusb */
+ /* | MUSB_POWER_ENSUSPEND */
+ );
musb->is_active = 0;
devctl = musb_readb(regs, MUSB_DEVCTL);
devctl &= ~MUSB_DEVCTL_SESSION;
- if (is_otg_enabled(musb)) {
- /* session started after:
- * (a) ID-grounded irq, host mode;
- * (b) vbus present/connect IRQ, peripheral mode;
- * (c) peripheral initiates, using SRP
- */
- if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
- musb->is_active = 1;
- else
- devctl |= MUSB_DEVCTL_SESSION;
-
- } else if (is_host_enabled(musb)) {
- /* assume ID pin is hard-wired to ground */
+ /* session started after:
+ * (a) ID-grounded irq, host mode;
+ * (b) vbus present/connect IRQ, peripheral mode;
+ * (c) peripheral initiates, using SRP
+ */
+ if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+ (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
+ musb->is_active = 1;
+ } else {
devctl |= MUSB_DEVCTL_SESSION;
-
- } else /* peripheral is enabled */ {
- if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
- musb->is_active = 1;
}
+
musb_platform_enable(musb);
musb_writeb(regs, MUSB_DEVCTL, devctl);
}
-
-static void musb_generic_disable(struct musb *musb)
-{
- void __iomem *mbase = musb->mregs;
- u16 temp;
-
- /* disable interrupts */
- musb_writeb(mbase, MUSB_INTRUSBE, 0);
- musb_writew(mbase, MUSB_INTRTXE, 0);
- musb_writew(mbase, MUSB_INTRRXE, 0);
-
- /* off */
- musb_writeb(mbase, MUSB_DEVCTL, 0);
-
- /* flush pending interrupts */
- temp = musb_readb(mbase, MUSB_INTRUSB);
- temp = musb_readw(mbase, MUSB_INTRTX);
- temp = musb_readw(mbase, MUSB_INTRRX);
-
-}
-
/*
* Make the HDRC stop (disable interrupts, etc.);
* reversible by musb_start
@@ -941,7 +984,7 @@ void musb_stop(struct musb *musb)
/* stop IRQs, timers, ... */
musb_platform_disable(musb);
musb_generic_disable(musb);
- DBG(3, "HDRC disabled\n");
+ dev_dbg(musb->controller, "HDRC disabled\n");
/* FIXME
* - mark host and/or peripheral drivers unusable/inactive
@@ -958,15 +1001,20 @@ static void musb_shutdown(struct platform_device *pdev)
struct musb *musb = dev_to_musb(&pdev->dev);
unsigned long flags;
+ pm_runtime_get_sync(musb->controller);
+
+ musb_host_cleanup(musb);
+ musb_gadget_cleanup(musb);
+
spin_lock_irqsave(&musb->lock, flags);
musb_platform_disable(musb);
musb_generic_disable(musb);
- if (musb->clock) {
- clk_put(musb->clock);
- musb->clock = NULL;
- }
spin_unlock_irqrestore(&musb->lock, flags);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_platform_exit(musb);
+
+ pm_runtime_put(musb->controller);
/* FIXME power down */
}
@@ -983,35 +1031,33 @@ static void musb_shutdown(struct platform_device *pdev)
* We don't currently use dynamic fifo setup capability to do anything
* more than selecting one of a bunch of predefined configurations.
*/
-#if defined(CONFIG_USB_TUSB6010) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
-static ushort __initdata fifo_mode = 4;
+#if defined(CONFIG_USB_MUSB_TUSB6010) \
+ || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) \
+ || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
+ || defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE) \
+ || defined(CONFIG_USB_MUSB_AM35X) \
+ || defined(CONFIG_USB_MUSB_AM35X_MODULE) \
+ || defined(CONFIG_USB_MUSB_DSPS) \
+ || defined(CONFIG_USB_MUSB_DSPS_MODULE)
+static ushort fifo_mode = 4;
+#elif defined(CONFIG_USB_MUSB_UX500) \
+ || defined(CONFIG_USB_MUSB_UX500_MODULE)
+static ushort fifo_mode = 5;
#else
-static ushort __initdata fifo_mode = 2;
+static ushort fifo_mode = 2;
#endif
/* "modprobe ... fifo_mode=1" etc */
module_param(fifo_mode, ushort, 0);
MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
-
-enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
-enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
-
-struct fifo_cfg {
- u8 hw_ep_num;
- enum fifo_style style;
- enum buf_mode mode;
- u16 maxpacket;
-};
-
/*
* tables defining fifo_mode values. define more if you like.
* for host side, make sure both halves of ep1 are set up.
*/
/* mode 0 - fits in 2KB */
-static struct fifo_cfg __initdata mode_0_cfg[] = {
+static struct musb_fifo_cfg mode_0_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
@@ -1020,7 +1066,7 @@ static struct fifo_cfg __initdata mode_0_cfg[] = {
};
/* mode 1 - fits in 4KB */
-static struct fifo_cfg __initdata mode_1_cfg[] = {
+static struct musb_fifo_cfg mode_1_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
@@ -1029,7 +1075,7 @@ static struct fifo_cfg __initdata mode_1_cfg[] = {
};
/* mode 2 - fits in 4KB */
-static struct fifo_cfg __initdata mode_2_cfg[] = {
+static struct musb_fifo_cfg mode_2_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1039,7 +1085,7 @@ static struct fifo_cfg __initdata mode_2_cfg[] = {
};
/* mode 3 - fits in 4KB */
-static struct fifo_cfg __initdata mode_3_cfg[] = {
+static struct musb_fifo_cfg mode_3_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1049,7 +1095,7 @@ static struct fifo_cfg __initdata mode_3_cfg[] = {
};
/* mode 4 - fits in 16KB */
-static struct fifo_cfg __initdata mode_4_cfg[] = {
+static struct musb_fifo_cfg mode_4_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1068,18 +1114,47 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
};
+/* mode 5 - fits in 8KB */
+static struct musb_fifo_cfg mode_5_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
/*
* configure a fifo; for non-shared endpoints, this may be called
@@ -1087,9 +1162,9 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
*
* returns negative errno or offset for next fifo.
*/
-static int __init
+static int
fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
- const struct fifo_cfg *cfg, u16 offset)
+ const struct musb_fifo_cfg *cfg, u16 offset)
{
void __iomem *mbase = musb->mregs;
int size = 0;
@@ -1116,14 +1191,12 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
/* configure the FIFO */
musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* EP0 reserved endpoint for control, bidirectional;
- * EP1 reserved for bulk, two unidirection halves.
+ * EP1 reserved for bulk, two unidirectional halves.
*/
if (hw_ep->epnum == 1)
musb->bulk_ep = hw_ep;
/* REVISIT error check: be sure ep0 can both rx and tx ... */
-#endif
switch (cfg->style) {
case FIFO_TX:
musb_write_txfifosz(mbase, c_size);
@@ -1160,17 +1233,23 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
}
-static struct fifo_cfg __initdata ep0_cfg = {
+static struct musb_fifo_cfg ep0_cfg = {
.style = FIFO_RXTX, .maxpacket = 64,
};
-static int __init ep_config_from_table(struct musb *musb)
+static int ep_config_from_table(struct musb *musb)
{
- const struct fifo_cfg *cfg;
+ const struct musb_fifo_cfg *cfg;
unsigned i, n;
int offset;
struct musb_hw_ep *hw_ep = musb->endpoints;
+ if (musb->config->fifo_cfg) {
+ cfg = musb->config->fifo_cfg;
+ n = musb->config->fifo_cfg_size;
+ goto done;
+ }
+
switch (fifo_mode) {
default:
fifo_mode = 0;
@@ -1195,12 +1274,17 @@ static int __init ep_config_from_table(struct musb *musb)
cfg = mode_4_cfg;
n = ARRAY_SIZE(mode_4_cfg);
break;
+ case 5:
+ cfg = mode_5_cfg;
+ n = ARRAY_SIZE(mode_5_cfg);
+ break;
}
printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
musb_driver_name, fifo_mode);
+done:
offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
/* assert(offset > 0) */
@@ -1220,7 +1304,7 @@ static int __init ep_config_from_table(struct musb *musb)
if (offset < 0) {
pr_debug("%s: mem overrun, ep %d\n",
musb_driver_name, epn);
- return -EINVAL;
+ return offset;
}
epn++;
musb->nr_endpoints = max(epn, musb->nr_endpoints);
@@ -1231,12 +1315,10 @@ static int __init ep_config_from_table(struct musb *musb)
n + 1, musb->config->num_eps * 2 - 1,
offset, (1 << (musb->config->ram_bits + 2)));
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (!musb->bulk_ep) {
pr_debug("%s: missing bulk\n", musb_driver_name);
return -EINVAL;
}
-#endif
return 0;
}
@@ -1246,14 +1328,14 @@ static int __init ep_config_from_table(struct musb *musb)
* ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
* @param musb the controller
*/
-static int __init ep_config_from_hw(struct musb *musb)
+static int ep_config_from_hw(struct musb *musb)
{
u8 epnum = 0;
struct musb_hw_ep *hw_ep;
- void *mbase = musb->mregs;
+ void __iomem *mbase = musb->mregs;
int ret = 0;
- DBG(2, "<== static silicon ep config\n");
+ dev_dbg(musb->controller, "<== static silicon ep config\n");
/* FIXME pick up ep0 maxpacket size */
@@ -1267,7 +1349,6 @@ static int __init ep_config_from_hw(struct musb *musb)
/* FIXME set up hw_ep->{rx,tx}_double_buffered */
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* pick an RX/TX endpoint for bulk */
if (hw_ep->max_packet_sz_tx < 512
|| hw_ep->max_packet_sz_rx < 512)
@@ -1279,15 +1360,12 @@ static int __init ep_config_from_hw(struct musb *musb)
if (musb->bulk_ep)
continue;
musb->bulk_ep = hw_ep;
-#endif
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (!musb->bulk_ep) {
pr_debug("%s: missing bulk\n", musb_driver_name);
return -EINVAL;
}
-#endif
return 0;
}
@@ -1297,49 +1375,38 @@ enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
* configure endpoints, or take their config from silicon
*/
-static int __init musb_core_init(u16 musb_type, struct musb *musb)
+static int musb_core_init(u16 musb_type, struct musb *musb)
{
-#ifdef MUSB_AHB_ID
- u32 data;
-#endif
u8 reg;
char *type;
- u16 hwvers, rev_major, rev_minor;
- char aInfo[78], aRevision[32], aDate[12];
+ char aInfo[90], aRevision[32], aDate[12];
void __iomem *mbase = musb->mregs;
int status = 0;
int i;
/* log core options (read using indexed model) */
- musb_ep_select(mbase, 0);
reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
- if (reg & MUSB_CONFIGDATA_DYNFIFO)
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
strcat(aInfo, ", dyn FIFOs");
+ musb->dyn_fifo = true;
+ }
if (reg & MUSB_CONFIGDATA_MPRXE) {
strcat(aInfo, ", bulk combine");
-#ifdef C_MP_RX
musb->bulk_combine = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_MPTXE) {
strcat(aInfo, ", bulk split");
-#ifdef C_MP_TX
musb->bulk_split = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_HBRXE) {
strcat(aInfo, ", HB-ISO Rx");
- strcat(aInfo, " (X)"); /* no driver support */
+ musb->hb_iso_rx = true;
}
if (reg & MUSB_CONFIGDATA_HBTXE) {
strcat(aInfo, ", HB-ISO Tx");
- strcat(aInfo, " (X)"); /* no driver support */
+ musb->hb_iso_tx = true;
}
if (reg & MUSB_CONFIGDATA_SOFTCONE)
strcat(aInfo, ", SoftConn");
@@ -1347,41 +1414,25 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
musb_driver_name, reg, aInfo);
-#ifdef MUSB_AHB_ID
- data = musb_readl(mbase, 0x404);
- sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
- (data >> 16) & 0xff, (data >> 24) & 0xff);
- /* FIXME ID2 and ID3 are unused */
- data = musb_readl(mbase, 0x408);
- printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
- data = musb_readl(mbase, 0x40c);
- printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
- reg = musb_readb(mbase, 0x400);
- musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
-#else
aDate[0] = 0;
-#endif
if (MUSB_CONTROLLER_MHDRC == musb_type) {
musb->is_multipoint = 1;
type = "M";
} else {
musb->is_multipoint = 0;
type = "";
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
printk(KERN_ERR
"%s: kernel must blacklist external hubs\n",
musb_driver_name);
#endif
-#endif
}
/* log release info */
- hwvers = musb_read_hwvers(mbase);
- rev_major = (hwvers >> 10) & 0x1f;
- rev_minor = hwvers & 0x3ff;
- snprintf(aRevision, 32, "%d.%d%s", rev_major,
- rev_minor, (hwvers & 0x8000) ? "RC" : "");
+ musb->hwvers = musb_read_hwvers(mbase);
+ snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers),
+ MUSB_HWVERS_MINOR(musb->hwvers),
+ (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
musb_driver_name, type, aRevision, aDate);
@@ -1392,21 +1443,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
musb->nr_endpoints = 1;
musb->epmask = 1;
- if (reg & MUSB_CONFIGDATA_DYNFIFO) {
- if (musb->config->dyn_fifo)
- status = ep_config_from_table(musb);
- else {
- ERR("reconfigure software for Dynamic FIFOs\n");
- status = -ENODEV;
- }
- } else {
- if (!musb->config->dyn_fifo)
- status = ep_config_from_hw(musb);
- else {
- ERR("reconfigure software for static FIFOs\n");
- return -ENODEV;
- }
- }
+ if (musb->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else
+ status = ep_config_from_hw(musb);
if (status < 0)
return status;
@@ -1416,7 +1456,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
struct musb_hw_ep *hw_ep = musb->endpoints + i;
hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync_va =
@@ -1429,14 +1469,12 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
#endif
hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
hw_ep->rx_reinit = 1;
hw_ep->tx_reinit = 1;
-#endif
if (hw_ep->max_packet_sz_tx) {
- printk(KERN_DEBUG
+ dev_dbg(musb->controller,
"%s: hw_ep %d%s, %smax %d\n",
musb_driver_name, i,
hw_ep->is_shared_fifo ? "shared" : "tx",
@@ -1445,7 +1483,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->max_packet_sz_tx);
}
if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
- printk(KERN_DEBUG
+ dev_dbg(musb->controller,
"%s: hw_ep %d%s, %smax %d\n",
musb_driver_name, i,
"rx",
@@ -1454,7 +1492,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->max_packet_sz_rx);
}
if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
- DBG(1, "hw_ep %d not configured\n", i);
+ dev_dbg(musb->controller, "hw_ep %d not configured\n", i);
}
return 0;
@@ -1462,38 +1500,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-
-static irqreturn_t generic_interrupt(int irq, void *__hci)
-{
- unsigned long flags;
- irqreturn_t retval = IRQ_NONE;
- struct musb *musb = __hci;
-
- spin_lock_irqsave(&musb->lock, flags);
-
- musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
- musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
- musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
-
- if (musb->int_usb || musb->int_tx || musb->int_rx)
- retval = musb_interrupt(musb);
-
- spin_unlock_irqrestore(&musb->lock, flags);
-
- /* REVISIT we sometimes get spurious IRQs on g_ep0
- * not clear why...
- */
- if (retval != IRQ_HANDLED)
- DBG(5, "spurious?\n");
-
- return IRQ_HANDLED;
-}
-
-#else
-#define generic_interrupt NULL
-#endif
-
/*
* handle all the irqs defined by the HDRC core. for now we expect: other
* irq sources (phy, dma, etc) will be handled first, musb->int_* values
@@ -1504,23 +1510,22 @@ static irqreturn_t generic_interrupt(int irq, void *__hci)
irqreturn_t musb_interrupt(struct musb *musb)
{
irqreturn_t retval = IRQ_NONE;
- u8 devctl, power;
+ u8 devctl;
int ep_num;
u32 reg;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- power = musb_readb(musb->mregs, MUSB_POWER);
- DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
+ dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n",
(devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
musb->int_usb, musb->int_tx, musb->int_rx);
/* the core can interrupt us for multiple reasons; docs have
* a generic interrupt flowchart to follow
*/
- if (musb->int_usb & STAGE0_MASK)
+ if (musb->int_usb)
retval |= musb_stage0_irq(musb, musb->int_usb,
- devctl, power);
+ devctl);
/* "stage 1" is handling endpoint irqs */
@@ -1540,13 +1545,10 @@ irqreturn_t musb_interrupt(struct musb *musb)
/* musb_ep_select(musb->mregs, ep_num); */
/* REVISIT just retval = ep->rx_irq(...) */
retval = IRQ_HANDLED;
- if (devctl & MUSB_DEVCTL_HM) {
- if (is_host_capable())
- musb_host_rx(musb, ep_num);
- } else {
- if (is_peripheral_capable())
- musb_g_rx(musb, ep_num);
- }
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_host_rx(musb, ep_num);
+ else
+ musb_g_rx(musb, ep_num);
}
reg >>= 1;
@@ -1561,29 +1563,21 @@ irqreturn_t musb_interrupt(struct musb *musb)
/* musb_ep_select(musb->mregs, ep_num); */
/* REVISIT just retval |= ep->tx_irq(...) */
retval = IRQ_HANDLED;
- if (devctl & MUSB_DEVCTL_HM) {
- if (is_host_capable())
- musb_host_tx(musb, ep_num);
- } else {
- if (is_peripheral_capable())
- musb_g_tx(musb, ep_num);
- }
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_host_tx(musb, ep_num);
+ else
+ musb_g_tx(musb, ep_num);
}
reg >>= 1;
ep_num++;
}
- /* finish handling "global" interrupts after handling fifos */
- if (musb->int_usb)
- retval |= musb_stage2_irq(musb,
- musb->int_usb, devctl, power);
-
return retval;
}
-
+EXPORT_SYMBOL_GPL(musb_interrupt);
#ifndef CONFIG_MUSB_PIO_ONLY
-static int __initdata use_dma = 1;
+static bool use_dma = 1;
/* "modprobe ... use_dma=0" etc */
module_param(use_dma, bool, 0);
@@ -1608,25 +1602,20 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
} else {
/* endpoints 1..15 */
if (transmit) {
- if (devctl & MUSB_DEVCTL_HM) {
- if (is_host_capable())
- musb_host_tx(musb, epnum);
- } else {
- if (is_peripheral_capable())
- musb_g_tx(musb, epnum);
- }
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_host_tx(musb, epnum);
+ else
+ musb_g_tx(musb, epnum);
} else {
/* receive */
- if (devctl & MUSB_DEVCTL_HM) {
- if (is_host_capable())
- musb_host_rx(musb, epnum);
- } else {
- if (is_peripheral_capable())
- musb_g_rx(musb, epnum);
- }
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_host_rx(musb, epnum);
+ else
+ musb_g_rx(musb, epnum);
}
}
}
+EXPORT_SYMBOL_GPL(musb_dma_completion);
#else
#define use_dma 0
@@ -1634,8 +1623,6 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_SYSFS
-
static ssize_t
musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -1644,7 +1631,7 @@ musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
int ret = -EINVAL;
spin_lock_irqsave(&musb->lock, flags);
- ret = sprintf(buf, "%s\n", otg_state_string(musb));
+ ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->state));
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
@@ -1682,13 +1669,14 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
unsigned long val;
if (sscanf(buf, "%lu", &val) < 1) {
- printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+ dev_err(dev, "Invalid VBUS timeout ms value\n");
return -EINVAL;
}
spin_lock_irqsave(&musb->lock, flags);
- musb->a_wait_bcon = val;
- if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+ /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
+ musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
+ if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
musb->is_active = 0;
musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
spin_unlock_irqrestore(&musb->lock, flags);
@@ -1706,16 +1694,17 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
spin_lock_irqsave(&musb->lock, flags);
val = musb->a_wait_bcon;
+ /* FIXME get_vbus_status() is normally #defined as false...
+ * and is effectively TUSB-specific.
+ */
vbus = musb_platform_get_vbus_status(musb);
spin_unlock_irqrestore(&musb->lock, flags);
- return sprintf(buf, "Vbus %s, timeout %lu\n",
+ return sprintf(buf, "Vbus %s, timeout %lu msec\n",
vbus ? "on" : "off", val);
}
static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-
/* Gadget drivers can't know that a host is connected so they might want
* to start SRP, but users can. This allows userspace to trigger SRP.
*/
@@ -1728,7 +1717,7 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
if (sscanf(buf, "%hu", &srp) != 1
|| (srp != 1)) {
- printk(KERN_ERR "SRP: Value must be 1\n");
+ dev_err(dev, "SRP: Value must be 1\n");
return -EINVAL;
}
@@ -1739,57 +1728,78 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
-#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+static struct attribute *musb_attributes[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_vbus.attr,
+ &dev_attr_srp.attr,
+ NULL
+};
-#endif /* sysfs */
+static const struct attribute_group musb_attr_group = {
+ .attrs = musb_attributes,
+};
/* Only used to provide driver mode change events */
static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work);
- static int old_state;
- if (musb->xceiv.state != old_state) {
- old_state = musb->xceiv.state;
+ if (musb->xceiv->state != musb->xceiv_old_state) {
+ musb->xceiv_old_state = musb->xceiv->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
}
+/* Recover from babble interrupt conditions */
+static void musb_recover_work(struct work_struct *data)
+{
+ struct musb *musb = container_of(data, struct musb, recover_work);
+ int status;
+
+ musb_platform_reset(musb);
+
+ usb_phy_vbus_off(musb->xceiv);
+ udelay(100);
+
+ usb_phy_vbus_on(musb->xceiv);
+ udelay(100);
+
+ /*
+ * When a babble condition occurs, the musb controller removes the
+ * session bit and the endpoint config is lost.
+ */
+ if (musb->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else
+ status = ep_config_from_hw(musb);
+
+ /* start the session again */
+ if (status == 0)
+ musb_start(musb);
+}
+
/* --------------------------------------------------------------------------
* Init support
*/
-static struct musb *__init
-allocate_instance(struct device *dev,
+static struct musb *allocate_instance(struct device *dev,
struct musb_hdrc_config *config, void __iomem *mbase)
{
struct musb *musb;
struct musb_hw_ep *ep;
int epnum;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- struct usb_hcd *hcd;
+ int ret;
- hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
- if (!hcd)
+ musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
+ if (!musb)
return NULL;
- /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
- musb = hcd_to_musb(hcd);
INIT_LIST_HEAD(&musb->control);
INIT_LIST_HEAD(&musb->in_bulk);
INIT_LIST_HEAD(&musb->out_bulk);
- hcd->uses_new_polling = 1;
-
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
-#else
- musb = kzalloc(sizeof *musb, GFP_KERNEL);
- if (!musb)
- return NULL;
- dev_set_drvdata(dev, musb);
-
-#endif
-
+ musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
musb->mregs = mbase;
musb->ctrl_base = mbase;
musb->nIrq = -ENODEV;
@@ -1803,7 +1813,17 @@ allocate_instance(struct device *dev,
}
musb->controller = dev;
+
+ ret = musb_host_alloc(musb);
+ if (ret < 0)
+ goto err_free;
+
+ dev_set_drvdata(dev, musb);
+
return musb;
+
+err_free:
+ return NULL;
}
static void musb_free(struct musb *musb)
@@ -1814,15 +1834,7 @@ static void musb_free(struct musb *musb)
*/
#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
-#endif
-
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- musb_gadget_cleanup(musb);
+ sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
#endif
if (musb->nIrq >= 0) {
@@ -1830,155 +1842,130 @@ static void musb_free(struct musb *musb)
disable_irq_wake(musb->nIrq);
free_irq(musb->nIrq, musb);
}
- if (is_dma_capable() && musb->dma_controller) {
- struct dma_controller *c = musb->dma_controller;
- (void) c->stop(c);
- dma_controller_destroy(c);
- }
+ musb_host_free(musb);
+}
- musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
- musb_platform_exit(musb);
- musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+static void musb_deassert_reset(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
- if (musb->clock) {
- clk_disable(musb->clock);
- clk_put(musb->clock);
- }
+ musb = container_of(work, struct musb, deassert_reset_work.work);
-#ifdef CONFIG_USB_MUSB_OTG
- put_device(musb->xceiv.dev);
-#endif
+ spin_lock_irqsave(&musb->lock, flags);
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- usb_put_hcd(musb_to_hcd(musb));
-#else
- kfree(musb);
-#endif
+ if (musb->port1_status & USB_PORT_STAT_RESET)
+ musb_port_reset(musb, false);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
}
/*
* Perform generic per-controller initialization.
*
- * @pDevice: the controller (already clocked, etc)
- * @nIrq: irq
- * @mregs: virtual address of controller registers,
+ * @dev: the controller (already clocked, etc)
+ * @nIrq: IRQ number
+ * @ctrl: virtual address of controller registers,
* not yet corrected for platform-specific offsets
*/
-static int __init
+static int
musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
{
int status;
struct musb *musb;
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
/* The driver might handle more features than the board; OK.
* Fail when the board needs a feature that's not enabled.
*/
if (!plat) {
dev_dbg(dev, "no platform_data?\n");
- return -ENODEV;
- }
- switch (plat->mode) {
- case MUSB_HOST:
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- break;
-#else
- goto bad_config;
-#endif
- case MUSB_PERIPHERAL:
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- break;
-#else
- goto bad_config;
-#endif
- case MUSB_OTG:
-#ifdef CONFIG_USB_MUSB_OTG
- break;
-#else
-bad_config:
-#endif
- default:
- dev_err(dev, "incompatible Kconfig role setting\n");
- return -EINVAL;
+ status = -ENODEV;
+ goto fail0;
}
/* allocate */
musb = allocate_instance(dev, plat->config, ctrl);
- if (!musb)
- return -ENOMEM;
+ if (!musb) {
+ status = -ENOMEM;
+ goto fail0;
+ }
+
+ pm_runtime_use_autosuspend(musb->controller);
+ pm_runtime_set_autosuspend_delay(musb->controller, 200);
+ pm_runtime_enable(musb->controller);
spin_lock_init(&musb->lock);
- musb->board_mode = plat->mode;
musb->board_set_power = plat->set_power;
- musb->set_clock = plat->set_clock;
musb->min_power = plat->min_power;
-
- /* Clock usage is chip-specific ... functional clock (DaVinci,
- * OMAP2430), or PHY ref (some TUSB6010 boards). All this core
- * code does is make sure a clock handle is available; platform
- * code manages it during start/stop and suspend/resume.
- */
- if (plat->clock) {
- musb->clock = clk_get(dev, plat->clock);
- if (IS_ERR(musb->clock)) {
- status = PTR_ERR(musb->clock);
- musb->clock = NULL;
- goto fail;
- }
- }
-
- /* assume vbus is off */
-
- /* platform adjusts musb->mregs and musb->isr if needed,
- * and activates clocks
+ musb->ops = plat->platform_ops;
+ musb->port_mode = plat->mode;
+
+ /* The musb_platform_init() call:
+ * - adjusts musb->mregs
+ * - sets the musb->isr
+ * - may initialize an integrated transceiver
+ * - initializes musb->xceiv, usually by otg_get_phy()
+ * - stops powering VBUS
+ *
+ * There are various transceiver configurations. Blackfin,
+ * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
+ * external/discrete ones in various flavors (twl4030 family,
+ * isp1504, non-OTG, etc) mostly hooking up through ULPI.
*/
- musb->isr = generic_interrupt;
status = musb_platform_init(musb);
-
if (status < 0)
- goto fail;
+ goto fail1;
+
if (!musb->isr) {
status = -ENODEV;
goto fail2;
}
-#ifndef CONFIG_MUSB_PIO_ONLY
- if (use_dma && dev->dma_mask) {
- struct dma_controller *c;
+ if (!musb->xceiv->io_ops) {
+ musb->xceiv->io_dev = musb->controller;
+ musb->xceiv->io_priv = musb->mregs;
+ musb->xceiv->io_ops = &musb_ulpi_access;
+ }
- c = dma_controller_create(musb, musb->mregs);
- musb->dma_controller = c;
- if (c)
- (void) c->start(c);
+ pm_runtime_get_sync(musb->controller);
+
+ if (use_dma && dev->dma_mask) {
+ musb->dma_controller = dma_controller_create(musb, musb->mregs);
+ if (IS_ERR(musb->dma_controller)) {
+ status = PTR_ERR(musb->dma_controller);
+ goto fail2_5;
+ }
}
-#endif
- /* ideally this would be abstracted in platform setup */
- if (!is_dma_capable() || !musb->dma_controller)
- dev->dma_mask = NULL;
/* be sure interrupts are disabled before connecting ISR */
musb_platform_disable(musb);
musb_generic_disable(musb);
+ /* Init IRQ workqueue before request_irq */
+ INIT_WORK(&musb->irq_work, musb_irq_work);
+ INIT_WORK(&musb->recover_work, musb_recover_work);
+ INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
+ INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
+
/* setup musb parts of the core (especially endpoints) */
status = musb_core_init(plat->config->multipoint
? MUSB_CONTROLLER_MHDRC
: MUSB_CONTROLLER_HDRC, musb);
if (status < 0)
- goto fail2;
+ goto fail3;
- /* Init IRQ workqueue before request_irq */
- INIT_WORK(&musb->irq_work, musb_irq_work);
+ setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
/* attach to the IRQ */
if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
dev_err(dev, "request_irq %d failed!\n", nIrq);
status = -ENODEV;
- goto fail2;
+ goto fail3;
}
musb->nIrq = nIrq;
-/* FIXME this handles wakeup irqs wrong */
+ /* FIXME this handles wakeup irqs wrong */
if (enable_irq_wake(nIrq) == 0) {
musb->irq_wake = 1;
device_init_wakeup(dev, 1);
@@ -1986,98 +1973,96 @@ bad_config:
musb->irq_wake = 0;
}
- pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
- musb_driver_name,
- ({char *s;
- switch (musb->board_mode) {
- case MUSB_HOST: s = "Host"; break;
- case MUSB_PERIPHERAL: s = "Peripheral"; break;
- default: s = "OTG"; break;
- }; s; }),
- ctrl,
- (is_dma_capable() && musb->dma_controller)
- ? "DMA" : "PIO",
- musb->nIrq);
-
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- /* host side needs more setup, except for no-host modes */
- if (musb->board_mode != MUSB_PERIPHERAL) {
- struct usb_hcd *hcd = musb_to_hcd(musb);
-
- if (musb->board_mode == MUSB_OTG)
- hcd->self.otg_port = 1;
- musb->xceiv.host = &hcd->self;
- hcd->power_budget = 2 * (plat->power ? : 250);
+ /* program PHY to use external vBus if required */
+ if (plat->extvbus) {
+ u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
+ busctl |= MUSB_ULPI_USE_EXTVBUS;
+ musb_write_ulpi_buscontrol(musb->mregs, busctl);
}
-#endif /* CONFIG_USB_MUSB_HDRC_HCD */
- /* For the host-only role, we can activate right away.
- * (We expect the ID pin to be forcibly grounded!!)
- * Otherwise, wait till the gadget driver hooks up.
- */
- if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
+ if (musb->xceiv->otg->default_a) {
MUSB_HST_MODE(musb);
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_IDLE;
-
- status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
- if (status)
- goto fail;
-
- DBG(1, "%s mode, status %d, devctl %02x %c\n",
- "HOST", status,
- musb_readb(musb->mregs, MUSB_DEVCTL),
- (musb_readb(musb->mregs, MUSB_DEVCTL)
- & MUSB_DEVCTL_BDEVICE
- ? 'B' : 'A'));
-
- } else /* peripheral is enabled */ {
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ } else {
MUSB_DEV_MODE(musb);
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ }
+ switch (musb->port_mode) {
+ case MUSB_PORT_MODE_HOST:
+ status = musb_host_setup(musb, plat->power);
+ if (status < 0)
+ goto fail3;
+ status = musb_platform_set_mode(musb, MUSB_HOST);
+ break;
+ case MUSB_PORT_MODE_GADGET:
+ status = musb_gadget_setup(musb);
+ if (status < 0)
+ goto fail3;
+ status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+ break;
+ case MUSB_PORT_MODE_DUAL_ROLE:
+ status = musb_host_setup(musb, plat->power);
+ if (status < 0)
+ goto fail3;
status = musb_gadget_setup(musb);
- if (status)
- goto fail;
+ if (status) {
+ musb_host_cleanup(musb);
+ goto fail3;
+ }
+ status = musb_platform_set_mode(musb, MUSB_OTG);
+ break;
+ default:
+ dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
+ break;
+ }
- DBG(1, "%s mode, status %d, dev%02x\n",
- is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
- status,
- musb_readb(musb->mregs, MUSB_DEVCTL));
+ if (status < 0)
+ goto fail3;
- }
+ status = musb_init_debugfs(musb);
+ if (status < 0)
+ goto fail4;
-#ifdef CONFIG_SYSFS
- status = device_create_file(dev, &dev_attr_mode);
- status = device_create_file(dev, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- status = device_create_file(dev, &dev_attr_srp);
-#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
- status = 0;
-#endif
+ status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
if (status)
- goto fail2;
+ goto fail5;
+
+ pm_runtime_put(musb->controller);
return 0;
+fail5:
+ musb_exit_debugfs(musb);
+
+fail4:
+ musb_gadget_cleanup(musb);
+ musb_host_cleanup(musb);
+
+fail3:
+ cancel_work_sync(&musb->irq_work);
+ cancel_work_sync(&musb->recover_work);
+ cancel_delayed_work_sync(&musb->finish_resume_work);
+ cancel_delayed_work_sync(&musb->deassert_reset_work);
+ if (musb->dma_controller)
+ dma_controller_destroy(musb->dma_controller);
+fail2_5:
+ pm_runtime_put_sync(musb->controller);
+
fail2:
-#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
-#endif
+ if (musb->irq_wake)
+ device_init_wakeup(dev, 0);
musb_platform_exit(musb);
-fail:
+
+fail1:
+ pm_runtime_disable(musb->controller);
dev_err(musb->controller,
"musb_init_controller failed with status %d\n", status);
- if (musb->clock)
- clk_put(musb->clock);
- device_init_wakeup(dev, 0);
musb_free(musb);
+fail0:
+
return status;
}
@@ -2087,68 +2072,211 @@ fail:
/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
* bridge to a platform device; this driver then suffices.
*/
-
-#ifndef CONFIG_MUSB_PIO_ONLY
-static u64 *orig_dma_mask;
-#endif
-
-static int __init musb_probe(struct platform_device *pdev)
+static int musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int irq = platform_get_irq(pdev, 0);
+ int irq = platform_get_irq_byname(pdev, "mc");
struct resource *iomem;
void __iomem *base;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem || irq == 0)
+ if (!iomem || irq <= 0)
return -ENODEV;
- base = ioremap(iomem->start, iomem->end - iomem->start + 1);
- if (!base) {
- dev_err(dev, "ioremap failed\n");
- return -ENOMEM;
- }
+ base = devm_ioremap_resource(dev, iomem);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
-#ifndef CONFIG_MUSB_PIO_ONLY
- /* clobbered by use_dma=n */
- orig_dma_mask = dev->dma_mask;
-#endif
return musb_init_controller(dev, irq, base);
}
-static int __devexit musb_remove(struct platform_device *pdev)
+static int musb_remove(struct platform_device *pdev)
{
- struct musb *musb = dev_to_musb(&pdev->dev);
- void __iomem *ctrl_base = musb->ctrl_base;
+ struct device *dev = &pdev->dev;
+ struct musb *musb = dev_to_musb(dev);
/* this gets called on rmmod.
* - Host mode: host may still be active
* - Peripheral mode: peripheral is deactivated (or never-activated)
* - OTG mode: both roles are deactivated (or never-activated)
*/
+ musb_exit_debugfs(musb);
musb_shutdown(pdev);
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- if (musb->board_mode == MUSB_HOST)
- usb_remove_hcd(musb_to_hcd(musb));
-#endif
+
+ if (musb->dma_controller)
+ dma_controller_destroy(musb->dma_controller);
+
+ cancel_work_sync(&musb->irq_work);
+ cancel_work_sync(&musb->recover_work);
+ cancel_delayed_work_sync(&musb->finish_resume_work);
+ cancel_delayed_work_sync(&musb->deassert_reset_work);
musb_free(musb);
- iounmap(ctrl_base);
- device_init_wakeup(&pdev->dev, 0);
-#ifndef CONFIG_MUSB_PIO_ONLY
- pdev->dev.dma_mask = orig_dma_mask;
-#endif
+ device_init_wakeup(dev, 0);
return 0;
}
#ifdef CONFIG_PM
-static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+static void musb_save_context(struct musb *musb)
{
- unsigned long flags;
- struct musb *musb = dev_to_musb(&pdev->dev);
+ int i;
+ void __iomem *musb_base = musb->mregs;
+ void __iomem *epio;
+
+ musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
+ musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
+ musb->context.power = musb_readb(musb_base, MUSB_POWER);
+ musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ musb->context.index = musb_readb(musb_base, MUSB_INDEX);
+ musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb->context.index_regs[i].txmaxp =
+ musb_readw(epio, MUSB_TXMAXP);
+ musb->context.index_regs[i].txcsr =
+ musb_readw(epio, MUSB_TXCSR);
+ musb->context.index_regs[i].rxmaxp =
+ musb_readw(epio, MUSB_RXMAXP);
+ musb->context.index_regs[i].rxcsr =
+ musb_readw(epio, MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ musb->context.index_regs[i].txfifoadd =
+ musb_read_txfifoadd(musb_base);
+ musb->context.index_regs[i].rxfifoadd =
+ musb_read_rxfifoadd(musb_base);
+ musb->context.index_regs[i].txfifosz =
+ musb_read_txfifosz(musb_base);
+ musb->context.index_regs[i].rxfifosz =
+ musb_read_rxfifosz(musb_base);
+ }
- if (!musb->clock)
- return 0;
+ musb->context.index_regs[i].txtype =
+ musb_readb(epio, MUSB_TXTYPE);
+ musb->context.index_regs[i].txinterval =
+ musb_readb(epio, MUSB_TXINTERVAL);
+ musb->context.index_regs[i].rxtype =
+ musb_readb(epio, MUSB_RXTYPE);
+ musb->context.index_regs[i].rxinterval =
+ musb_readb(epio, MUSB_RXINTERVAL);
+
+ musb->context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb_base, i);
+ musb->context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb_base, i);
+ musb->context.index_regs[i].txhubport =
+ musb_read_txhubport(musb_base, i);
+
+ musb->context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb_base, i);
+ musb->context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb_base, i);
+ musb->context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb_base, i);
+ }
+}
+
+static void musb_restore_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+ void __iomem *ep_target_regs;
+ void __iomem *epio;
+ u8 power;
+
+ musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
+ musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
+
+ /* Don't affect SUSPENDM/RESUME bits in POWER reg */
+ power = musb_readb(musb_base, MUSB_POWER);
+ power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
+ musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
+ power |= musb->context.power;
+ musb_writeb(musb_base, MUSB_POWER, power);
+
+ musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
+ musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_writew(epio, MUSB_TXMAXP,
+ musb->context.index_regs[i].txmaxp);
+ musb_writew(epio, MUSB_TXCSR,
+ musb->context.index_regs[i].txcsr);
+ musb_writew(epio, MUSB_RXMAXP,
+ musb->context.index_regs[i].rxmaxp);
+ musb_writew(epio, MUSB_RXCSR,
+ musb->context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_write_txfifosz(musb_base,
+ musb->context.index_regs[i].txfifosz);
+ musb_write_rxfifosz(musb_base,
+ musb->context.index_regs[i].rxfifosz);
+ musb_write_txfifoadd(musb_base,
+ musb->context.index_regs[i].txfifoadd);
+ musb_write_rxfifoadd(musb_base,
+ musb->context.index_regs[i].rxfifoadd);
+ }
+
+ musb_writeb(epio, MUSB_TXTYPE,
+ musb->context.index_regs[i].txtype);
+ musb_writeb(epio, MUSB_TXINTERVAL,
+ musb->context.index_regs[i].txinterval);
+ musb_writeb(epio, MUSB_RXTYPE,
+ musb->context.index_regs[i].rxtype);
+ musb_writeb(epio, MUSB_RXINTERVAL,
+
+ musb->context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb_base, i,
+ musb->context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb_base, i,
+ musb->context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb_base, i,
+ musb->context.index_regs[i].txhubport);
+
+ ep_target_regs =
+ musb_read_target_reg_base(i, musb_base);
+
+ musb_write_rxfunaddr(ep_target_regs,
+ musb->context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(ep_target_regs,
+ musb->context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(ep_target_regs,
+ musb->context.index_regs[i].rxhubport);
+ }
+ musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
+}
+
+static int musb_suspend(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
@@ -2162,40 +2290,71 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message)
*/
}
- if (musb->set_clock)
- musb->set_clock(musb->clock, 0);
- else
- clk_disable(musb->clock);
+ musb_save_context(musb);
+
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
-static int musb_resume(struct platform_device *pdev)
+static int musb_resume_noirq(struct device *dev)
{
- unsigned long flags;
- struct musb *musb = dev_to_musb(&pdev->dev);
+ struct musb *musb = dev_to_musb(dev);
+
+ /*
+ * For static cmos like DaVinci, register values were preserved
+ * unless for some reason the whole soc powered down or the USB
+ * module got reset through the PSC (vs just being disabled).
+ *
+ * For the DSPS glue layer though, a full register restore has to
+ * be done. As it shouldn't harm other platforms, we do it
+ * unconditionally.
+ */
- if (!musb->clock)
- return 0;
+ musb_restore_context(musb);
- spin_lock_irqsave(&musb->lock, flags);
+ return 0;
+}
- if (musb->set_clock)
- musb->set_clock(musb->clock, 1);
- else
- clk_enable(musb->clock);
+static int musb_runtime_suspend(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+
+ musb_save_context(musb);
+
+ return 0;
+}
+
+static int musb_runtime_resume(struct device *dev)
+{
+ struct musb *musb = dev_to_musb(dev);
+ static int first = 1;
- /* for static cmos like DaVinci, register values were preserved
- * unless for some reason the whole soc powered down and we're
- * not treating that as a whole-system restart (e.g. swsusp)
+ /*
+ * When pm_runtime_get_sync called for the first time in driver
+ * init, some of the structure is still not initialized which is
+ * used in restore function. But clock needs to be
+ * enabled before any register access, so
+ * pm_runtime_get_sync has to be called.
+ * Also context restore without save does not make
+ * any sense
*/
- spin_unlock_irqrestore(&musb->lock, flags);
+ if (!first)
+ musb_restore_context(musb);
+ first = 0;
+
return 0;
}
+static const struct dev_pm_ops musb_dev_pm_ops = {
+ .suspend = musb_suspend,
+ .resume_noirq = musb_resume_noirq,
+ .runtime_suspend = musb_runtime_suspend,
+ .runtime_resume = musb_runtime_resume,
+};
+
+#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
#else
-#define musb_suspend NULL
-#define musb_resume NULL
+#define MUSB_DEV_PM_OPS NULL
#endif
static struct platform_driver musb_driver = {
@@ -2203,54 +2362,11 @@ static struct platform_driver musb_driver = {
.name = (char *)musb_driver_name,
.bus = &platform_bus_type,
.owner = THIS_MODULE,
+ .pm = MUSB_DEV_PM_OPS,
},
- .remove = __devexit_p(musb_remove),
+ .probe = musb_probe,
+ .remove = musb_remove,
.shutdown = musb_shutdown,
- .suspend = musb_suspend,
- .resume = musb_resume,
};
-/*-------------------------------------------------------------------------*/
-
-static int __init musb_init(void)
-{
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- if (usb_disabled())
- return 0;
-#endif
-
- pr_info("%s: version " MUSB_VERSION ", "
-#ifdef CONFIG_MUSB_PIO_ONLY
- "pio"
-#elif defined(CONFIG_USB_TI_CPPI_DMA)
- "cppi-dma"
-#elif defined(CONFIG_USB_INVENTRA_DMA)
- "musb-dma"
-#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
- "tusb-omap-dma"
-#else
- "?dma?"
-#endif
- ", "
-#ifdef CONFIG_USB_MUSB_OTG
- "otg (peripheral+host)"
-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
- "peripheral"
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
- "host"
-#endif
- ", debug=%d\n",
- musb_driver_name, musb_debug);
- return platform_driver_probe(&musb_driver, musb_probe);
-}
-
-/* make us init after usbcore and i2c (transceivers, regulators, etc)
- * and before usb gadget and host-side drivers start to register
- */
-fs_initcall(musb_init);
-
-static void __exit musb_cleanup(void)
-{
- platform_driver_unregister(&musb_driver);
-}
-module_exit(musb_cleanup);
+module_platform_driver(musb_driver);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 630946a2d9f..d155a156f24 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -38,20 +38,30 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
-#include <linux/clk.h>
+#include <linux/timer.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/musb.h>
+#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
struct musb;
struct musb_hw_ep;
struct musb_ep;
+/* Helper defines for struct musb->hwvers */
+#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
+#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
+#define MUSB_HWVERS_RC 0x8000
+#define MUSB_HWVERS_1300 0x52C
+#define MUSB_HWVERS_1400 0x590
+#define MUSB_HWVERS_1800 0x720
+#define MUSB_HWVERS_1900 0x784
+#define MUSB_HWVERS_2000 0x800
#include "musb_debug.h"
#include "musb_dma.h"
@@ -60,95 +70,20 @@ struct musb_ep;
#include "musb_regs.h"
#include "musb_gadget.h"
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "musb_host.h"
-
-
-#ifdef CONFIG_USB_MUSB_OTG
-
-#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
-#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
-#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
-
/* NOTE: otg and peripheral-only state machines start at B_IDLE.
* OTG or host-only go to A_IDLE when ID is sensed.
*/
#define is_peripheral_active(m) (!(m)->is_host)
#define is_host_active(m) ((m)->is_host)
-#else
-#define is_peripheral_enabled(musb) is_peripheral_capable()
-#define is_host_enabled(musb) is_host_capable()
-#define is_otg_enabled(musb) 0
-
-#define is_peripheral_active(musb) is_peripheral_capable()
-#define is_host_active(musb) is_host_capable()
-#endif
-
-#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
-/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
- * override that choice selection (often USB_GADGET_DUMMY_HCD).
- */
-#ifndef CONFIG_USB_GADGET_MUSB_HDRC
-#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
-#endif
-#endif /* need MUSB gadget selection */
-
-
-#ifdef CONFIG_PROC_FS
-#include <linux/fs.h>
-#define MUSB_CONFIG_PROC_FS
-#endif
-
-/****************************** PERIPHERAL ROLE *****************************/
-
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-
-#define is_peripheral_capable() (1)
-
-extern irqreturn_t musb_g_ep0_irq(struct musb *);
-extern void musb_g_tx(struct musb *, u8);
-extern void musb_g_rx(struct musb *, u8);
-extern void musb_g_reset(struct musb *);
-extern void musb_g_suspend(struct musb *);
-extern void musb_g_resume(struct musb *);
-extern void musb_g_wakeup(struct musb *);
-extern void musb_g_disconnect(struct musb *);
-
-#else
-
-#define is_peripheral_capable() (0)
-
-static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
-static inline void musb_g_reset(struct musb *m) {}
-static inline void musb_g_suspend(struct musb *m) {}
-static inline void musb_g_resume(struct musb *m) {}
-static inline void musb_g_wakeup(struct musb *m) {}
-static inline void musb_g_disconnect(struct musb *m) {}
-
-#endif
-
-/****************************** HOST ROLE ***********************************/
-
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-
-#define is_host_capable() (1)
-
-extern irqreturn_t musb_h_ep0_irq(struct musb *);
-extern void musb_host_tx(struct musb *, u8);
-extern void musb_host_rx(struct musb *, u8);
-
-#else
-
-#define is_host_capable() (0)
-
-static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
-static inline void musb_host_tx(struct musb *m, u8 e) {}
-static inline void musb_host_rx(struct musb *m, u8 e) {}
-
-#endif
-
+enum {
+ MUSB_PORT_MODE_HOST = 1,
+ MUSB_PORT_MODE_GADGET,
+ MUSB_PORT_MODE_DUAL_ROLE,
+};
/****************************** CONSTANTS ********************************/
@@ -171,7 +106,8 @@ enum musb_h_ep0_state {
/* peripheral side ep0 states */
enum musb_g_ep0_state {
- MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */
+ MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */
+ MUSB_EP0_STAGE_SETUP, /* received SETUP */
MUSB_EP0_STAGE_TX, /* IN data */
MUSB_EP0_STAGE_RX, /* OUT data */
MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
@@ -179,10 +115,15 @@ enum musb_g_ep0_state {
MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
} __attribute__ ((packed));
-/* OTG protocol constants */
+/*
+ * OTG protocol constants. See USB OTG 1.3 spec,
+ * sections 5.5 "Device Timings" and 6.6.5 "Timers".
+ */
#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
-#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
-#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
+#define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
+#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
+#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
+
/*************************** REGISTER ACCESS ********************************/
@@ -190,8 +131,9 @@ enum musb_g_ep0_state {
* directly with the "flat" model, or after setting up an index register.
*/
-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
- || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN)
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
+ || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
+ || defined(CONFIG_ARCH_OMAP4)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
*/
@@ -199,7 +141,8 @@ enum musb_g_ep0_state {
#endif
/* TUSB mapping: "flat" plus ep0 special cases */
-#if defined(CONFIG_USB_TUSB6010)
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
#define musb_ep_select(_mbase, _epnum) \
musb_writeb((_mbase), MUSB_INDEX, (_epnum))
#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
@@ -230,6 +173,35 @@ enum musb_g_ep0_state {
/******************************** TYPES *************************************/
+/**
+ * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
+ * @init: turns on clocks, sets up platform-specific registers, etc
+ * @exit: undoes @init
+ * @set_mode: forcefully changes operating mode
+ * @try_ilde: tries to idle the IP
+ * @vbus_status: returns vbus status if possible
+ * @set_vbus: forces vbus status
+ * @adjust_channel_params: pre check for standard dma channel_program func
+ */
+struct musb_platform_ops {
+ int (*init)(struct musb *musb);
+ int (*exit)(struct musb *musb);
+
+ void (*enable)(struct musb *musb);
+ void (*disable)(struct musb *musb);
+
+ int (*set_mode)(struct musb *musb, u8 mode);
+ void (*try_idle)(struct musb *musb, unsigned long timeout);
+ void (*reset)(struct musb *musb);
+
+ int (*vbus_status)(struct musb *musb);
+ void (*set_vbus)(struct musb *musb, int on);
+
+ int (*adjust_channel_params)(struct dma_channel *channel,
+ u16 packet_sz, u8 *mode,
+ dma_addr_t *dma_addr, u32 *len);
+};
+
/*
* struct musb_hw_ep - endpoint hardware (bidirectional)
*
@@ -240,7 +212,8 @@ struct musb_hw_ep {
void __iomem *fifo;
void __iomem *regs;
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
void __iomem *conf;
#endif
@@ -257,14 +230,14 @@ struct musb_hw_ep {
struct dma_channel *tx_channel;
struct dma_channel *rx_channel;
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
/* TUSB has "asynchronous" and "synchronous" dma modes */
dma_addr_t fifo_async;
dma_addr_t fifo_sync;
void __iomem *fifo_sync_va;
#endif
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
void __iomem *target_regs;
/* currently scheduled peripheral endpoint */
@@ -273,49 +246,69 @@ struct musb_hw_ep {
u8 rx_reinit;
u8 tx_reinit;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* peripheral side */
struct musb_ep ep_in; /* TX */
struct musb_ep ep_out; /* RX */
-#endif
};
-static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep)
{
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
return next_request(&hw_ep->ep_in);
-#else
- return NULL;
-#endif
}
-static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep)
{
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
return next_request(&hw_ep->ep_out);
-#else
- return NULL;
-#endif
}
+struct musb_csr_regs {
+ /* FIFO registers */
+ u16 txmaxp, txcsr, rxmaxp, rxcsr;
+ u16 rxfifoadd, txfifoadd;
+ u8 txtype, txinterval, rxtype, rxinterval;
+ u8 rxfifosz, txfifosz;
+ u8 txfunaddr, txhubaddr, txhubport;
+ u8 rxfunaddr, rxhubaddr, rxhubport;
+};
+
+struct musb_context_registers {
+
+ u8 power;
+ u8 intrusbe;
+ u16 frame;
+ u8 index, testmode;
+
+ u8 devctl, busctl, misc;
+ u32 otg_interfsel;
+
+ struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
+};
+
/*
* struct musb - Driver instance data.
*/
struct musb {
/* device lock */
spinlock_t lock;
- struct clk *clock;
+
+ const struct musb_platform_ops *ops;
+ struct musb_context_registers context;
+
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
+ struct work_struct recover_work;
+ struct delayed_work deassert_reset_work;
+ struct delayed_work finish_resume_work;
+ u16 hwvers;
+ u16 intrrxe;
+ u16 intrtxe;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
#define MUSB_PORT_STAT_RESUME (1 << 31)
u32 port1_status;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
unsigned long rh_timer;
enum musb_h_ep0_state ep0_stage;
@@ -331,13 +324,9 @@ struct musb {
struct list_head control; /* of musb_qh */
struct list_head in_bulk; /* of musb_qh */
struct list_head out_bulk; /* of musb_qh */
- struct musb_qh *periodic[32]; /* tree of interrupt+iso */
-#endif
- /* called with IRQs blocked; ON/nonzero implies starting a session,
- * and waiting at least a_wait_vrise_tmout.
- */
- void (*board_set_vbus)(struct musb *, int is_on);
+ struct timer_list otg_timer;
+ struct notifier_block nb;
struct dma_controller *dma_controller;
@@ -345,10 +334,12 @@ struct musb {
void __iomem *ctrl_base;
void __iomem *mregs;
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
dma_addr_t async;
dma_addr_t sync;
void __iomem *sync_va;
+ u8 tusb_revision;
#endif
/* passed down from chip/board specific irq handlers */
@@ -356,7 +347,8 @@ struct musb {
u16 int_rx;
u16 int_tx;
- struct otg_transceiver xceiv;
+ struct usb_phy *xceiv;
+ struct phy *phy;
int nIrq;
unsigned irq_wake:1;
@@ -369,13 +361,11 @@ struct musb {
u16 epmask;
u8 nr_endpoints;
- u8 board_mode; /* enum musb_mode */
int (*board_set_power)(int state);
- int (*set_clock)(struct clk *clk, int is_active);
-
u8 min_power; /* vbus for periph, in mA/2 */
+ int port_mode; /* MUSB_PORT_MODE_* */
bool is_host;
int a_wait_bcon; /* VBUS timeout in msecs */
@@ -385,25 +375,19 @@ struct musb {
unsigned is_active:1;
unsigned is_multipoint:1;
- unsigned ignore_disconnect:1; /* during bus resets */
-#ifdef C_MP_TX
- unsigned bulk_split:1;
+ unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
+ unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
+ unsigned dyn_fifo:1; /* dynamic FIFO supported? */
+
+ unsigned bulk_split:1;
#define can_bulk_split(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#else
-#define can_bulk_split(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#ifdef C_MP_RX
- unsigned bulk_combine:1;
+ unsigned bulk_combine:1;
#define can_bulk_combine(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
-#else
-#define can_bulk_combine(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* is_suspended means USB B_PERIPHERAL suspend */
unsigned is_suspended:1;
@@ -427,26 +411,33 @@ struct musb {
enum musb_g_ep0_state ep0_state;
struct usb_gadget g; /* the gadget */
struct usb_gadget_driver *gadget_driver; /* its driver */
-#endif
+ struct usb_hcd *hcd; /* the usb hcd */
+
+ /*
+ * FIXME: Remove this flag.
+ *
+ * This is only added to allow Blackfin to work
+ * with current driver. For some unknown reason
+ * Blackfin doesn't work with double buffering
+ * and that's enabled by default.
+ *
+ * We added this flag to forcefully disable double
+ * buffering until we get it working.
+ */
+ unsigned double_buffer_not_ok:1;
struct musb_hdrc_config *config;
-#ifdef MUSB_CONFIG_PROC_FS
- struct proc_dir_entry *proc_entry;
+ int xceiv_old_state;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
#endif
};
-static inline void musb_set_vbus(struct musb *musb, int is_on)
-{
- musb->board_set_vbus(musb, is_on);
-}
-
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
static inline struct musb *gadget_to_musb(struct usb_gadget *g)
{
return container_of(g, struct musb, g);
}
-#endif
#ifdef CONFIG_BLACKFIN
static inline int musb_read_fifosize(struct musb *musb,
@@ -479,10 +470,11 @@ static inline void musb_configure_ep0(struct musb *musb)
static inline int musb_read_fifosize(struct musb *musb,
struct musb_hw_ep *hw_ep, u8 epnum)
{
+ void __iomem *mbase = musb->mregs;
u8 reg = 0;
/* read from core using indexed model */
- reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+ reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE));
/* 0's returned when no more endpoints */
if (!reg)
return -ENODEV;
@@ -509,6 +501,7 @@ static inline void musb_configure_ep0(struct musb *musb)
{
musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].is_shared_fifo = true;
}
#endif /* CONFIG_BLACKFIN */
@@ -517,8 +510,8 @@ static inline void musb_configure_ep0(struct musb *musb)
extern const char musb_driver_name[];
-extern void musb_start(struct musb *musb);
extern void musb_stop(struct musb *musb);
+extern void musb_start(struct musb *musb);
extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
@@ -527,27 +520,69 @@ extern void musb_load_testpacket(struct musb *);
extern irqreturn_t musb_interrupt(struct musb *);
-extern void musb_platform_enable(struct musb *musb);
-extern void musb_platform_disable(struct musb *musb);
-
extern void musb_hnp_stop(struct musb *musb);
-extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
+{
+ if (musb->ops->set_vbus)
+ musb->ops->set_vbus(musb, is_on);
+}
-#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
-extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
-#else
-#define musb_platform_try_idle(x, y) do {} while (0)
-#endif
+static inline void musb_platform_enable(struct musb *musb)
+{
+ if (musb->ops->enable)
+ musb->ops->enable(musb);
+}
-#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN)
-extern int musb_platform_get_vbus_status(struct musb *musb);
-#else
-#define musb_platform_get_vbus_status(x) 0
-#endif
+static inline void musb_platform_disable(struct musb *musb)
+{
+ if (musb->ops->disable)
+ musb->ops->disable(musb);
+}
+
+static inline int musb_platform_set_mode(struct musb *musb, u8 mode)
+{
+ if (!musb->ops->set_mode)
+ return 0;
-extern int __init musb_platform_init(struct musb *musb);
-extern int musb_platform_exit(struct musb *musb);
+ return musb->ops->set_mode(musb, mode);
+}
+
+static inline void musb_platform_try_idle(struct musb *musb,
+ unsigned long timeout)
+{
+ if (musb->ops->try_idle)
+ musb->ops->try_idle(musb, timeout);
+}
+
+static inline void musb_platform_reset(struct musb *musb)
+{
+ if (musb->ops->reset)
+ musb->ops->reset(musb);
+}
+
+static inline int musb_platform_get_vbus_status(struct musb *musb)
+{
+ if (!musb->ops->vbus_status)
+ return 0;
+
+ return musb->ops->vbus_status(musb);
+}
+
+static inline int musb_platform_init(struct musb *musb)
+{
+ if (!musb->ops->init)
+ return -EINVAL;
+
+ return musb->ops->init(musb);
+}
+
+static inline int musb_platform_exit(struct musb *musb)
+{
+ if (!musb->ops->exit)
+ return -EINVAL;
+
+ return musb->ops->exit(musb);
+}
#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
new file mode 100644
index 00000000000..5341bb223b7
--- /dev/null
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -0,0 +1,744 @@
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/sizes.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "musb_core.h"
+
+#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
+
+#define EP_MODE_AUTOREG_NONE 0
+#define EP_MODE_AUTOREG_ALL_NEOP 1
+#define EP_MODE_AUTOREG_ALWAYS 3
+
+#define EP_MODE_DMA_TRANSPARENT 0
+#define EP_MODE_DMA_RNDIS 1
+#define EP_MODE_DMA_GEN_RNDIS 3
+
+#define USB_CTRL_TX_MODE 0x70
+#define USB_CTRL_RX_MODE 0x74
+#define USB_CTRL_AUTOREQ 0xd0
+#define USB_TDOWN 0xd8
+
+struct cppi41_dma_channel {
+ struct dma_channel channel;
+ struct cppi41_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+ struct dma_chan *dc;
+ dma_cookie_t cookie;
+ u8 port_num;
+ u8 is_tx;
+ u8 is_allocated;
+ u8 usb_toggle;
+
+ dma_addr_t buf_addr;
+ u32 total_len;
+ u32 prog_len;
+ u32 transferred;
+ u32 packet_sz;
+ struct list_head tx_check;
+ struct work_struct dma_completion;
+};
+
+#define MUSB_DMA_NUM_CHANNELS 15
+
+struct cppi41_dma_controller {
+ struct dma_controller controller;
+ struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct musb *musb;
+ struct hrtimer early_tx;
+ struct list_head early_tx_list;
+ u32 rx_mode;
+ u32 tx_mode;
+ u32 auto_req;
+};
+
+static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+{
+ u16 csr;
+ u8 toggle;
+
+ if (cppi41_channel->is_tx)
+ return;
+ if (!is_host_active(cppi41_channel->controller->musb))
+ return;
+
+ csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
+ toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
+
+ cppi41_channel->usb_toggle = toggle;
+}
+
+static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+{
+ u16 csr;
+ u8 toggle;
+
+ if (cppi41_channel->is_tx)
+ return;
+ if (!is_host_active(cppi41_channel->controller->musb))
+ return;
+
+ csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
+ toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
+
+ /*
+ * AM335x Advisory 1.0.13: Due to internal synchronisation error the
+ * data toggle may reset from DATA1 to DATA0 during receiving data from
+ * more than one endpoint.
+ */
+ if (!toggle && toggle == cppi41_channel->usb_toggle) {
+ csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
+ musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
+ dev_dbg(cppi41_channel->controller->musb->controller,
+ "Restoring DATA1 toggle.\n");
+ }
+
+ cppi41_channel->usb_toggle = toggle;
+}
+
+static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
+{
+ u8 epnum = hw_ep->epnum;
+ struct musb *musb = hw_ep->musb;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ u16 csr;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ return false;
+ return true;
+}
+
+static bool is_isoc(struct musb_hw_ep *hw_ep, bool in)
+{
+ if (in && hw_ep->in_qh) {
+ if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC)
+ return true;
+ } else if (hw_ep->out_qh) {
+ if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC)
+ return true;
+ }
+ return false;
+}
+
+static void cppi41_dma_callback(void *private_data);
+
+static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
+{
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+
+ if (!cppi41_channel->prog_len ||
+ (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
+
+ /* done, complete */
+ cppi41_channel->channel.actual_len =
+ cppi41_channel->transferred;
+ cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
+ } else {
+ /* next iteration, reload */
+ struct dma_chan *dc = cppi41_channel->dc;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ u16 csr;
+ u32 remain_bytes;
+ void __iomem *epio = cppi41_channel->hw_ep->regs;
+
+ cppi41_channel->buf_addr += cppi41_channel->packet_sz;
+
+ remain_bytes = cppi41_channel->total_len;
+ remain_bytes -= cppi41_channel->transferred;
+ remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
+ cppi41_channel->prog_len = remain_bytes;
+
+ direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
+ : DMA_DEV_TO_MEM;
+ dma_desc = dmaengine_prep_slave_single(dc,
+ cppi41_channel->buf_addr,
+ remain_bytes,
+ direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (WARN_ON(!dma_desc))
+ return;
+
+ dma_desc->callback = cppi41_dma_callback;
+ dma_desc->callback_param = &cppi41_channel->channel;
+ cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+ dma_async_issue_pending(dc);
+
+ if (!cppi41_channel->is_tx) {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+}
+
+static void cppi_trans_done_work(struct work_struct *work)
+{
+ unsigned long flags;
+ struct cppi41_dma_channel *cppi41_channel =
+ container_of(work, struct cppi41_dma_channel, dma_completion);
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->musb;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ bool empty;
+
+ if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) {
+ spin_lock_irqsave(&musb->lock, flags);
+ cppi41_trans_done(cppi41_channel);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ } else {
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ spin_lock_irqsave(&musb->lock, flags);
+ cppi41_trans_done(cppi41_channel);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ } else {
+ schedule_work(&cppi41_channel->dma_completion);
+ }
+ }
+}
+
+static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
+{
+ struct cppi41_dma_controller *controller;
+ struct cppi41_dma_channel *cppi41_channel, *n;
+ struct musb *musb;
+ unsigned long flags;
+ enum hrtimer_restart ret = HRTIMER_NORESTART;
+
+ controller = container_of(timer, struct cppi41_dma_controller,
+ early_tx);
+ musb = controller->musb;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
+ tx_check) {
+ bool empty;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ list_del_init(&cppi41_channel->tx_check);
+ cppi41_trans_done(cppi41_channel);
+ }
+ }
+
+ if (!list_empty(&controller->early_tx_list)) {
+ ret = HRTIMER_RESTART;
+ hrtimer_forward_now(&controller->early_tx,
+ ktime_set(0, 150 * NSEC_PER_USEC));
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return ret;
+}
+
+static void cppi41_dma_callback(void *private_data)
+{
+ struct dma_channel *channel = private_data;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ unsigned long flags;
+ struct dma_tx_state txstate;
+ u32 transferred;
+ bool empty;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
+ &txstate);
+ transferred = cppi41_channel->prog_len - txstate.residue;
+ cppi41_channel->transferred += transferred;
+
+ dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
+ hw_ep->epnum, cppi41_channel->transferred,
+ cppi41_channel->total_len);
+
+ update_rx_toggle(cppi41_channel);
+
+ if (cppi41_channel->transferred == cppi41_channel->total_len ||
+ transferred < cppi41_channel->packet_sz)
+ cppi41_channel->prog_len = 0;
+
+ if (!cppi41_channel->is_tx) {
+ if (is_isoc(hw_ep, 1))
+ schedule_work(&cppi41_channel->dma_completion);
+ else
+ cppi41_trans_done(cppi41_channel);
+ goto out;
+ }
+
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ cppi41_trans_done(cppi41_channel);
+ } else {
+ struct cppi41_dma_controller *controller;
+ /*
+ * On AM335x it has been observed that the TX interrupt fires
+ * too early that means the TXFIFO is not yet empty but the DMA
+ * engine says that it is done with the transfer. We don't
+ * receive a FIFO empty interrupt so the only thing we can do is
+ * to poll for the bit. On HS it usually takes 2us, on FS around
+ * 110us - 150us depending on the transfer size.
+ * We spin on HS (no longer than than 25us and setup a timer on
+ * FS to check for the bit and complete the transfer.
+ */
+ controller = cppi41_channel->controller;
+
+ if (musb->g.speed == USB_SPEED_HIGH) {
+ unsigned wait = 25;
+
+ do {
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty)
+ break;
+ wait--;
+ if (!wait)
+ break;
+ udelay(1);
+ } while (1);
+
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ if (empty) {
+ cppi41_trans_done(cppi41_channel);
+ goto out;
+ }
+ }
+ if (is_isoc(hw_ep, 0)) {
+ schedule_work(&cppi41_channel->dma_completion);
+ goto out;
+ }
+ list_add_tail(&cppi41_channel->tx_check,
+ &controller->early_tx_list);
+ if (!hrtimer_is_queued(&controller->early_tx)) {
+ hrtimer_start_range_ns(&controller->early_tx,
+ ktime_set(0, 140 * NSEC_PER_USEC),
+ 40 * NSEC_PER_USEC,
+ HRTIMER_MODE_REL);
+ }
+ }
+out:
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
+{
+ unsigned shift;
+
+ shift = (ep - 1) * 2;
+ old &= ~(3 << shift);
+ old |= mode << shift;
+ return old;
+}
+
+static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ if (cppi41_channel->is_tx)
+ old_mode = controller->tx_mode;
+ else
+ old_mode = controller->rx_mode;
+ port = cppi41_channel->port_num;
+ new_mode = update_ep_mode(port, mode, old_mode);
+
+ if (new_mode == old_mode)
+ return;
+ if (cppi41_channel->is_tx) {
+ controller->tx_mode = new_mode;
+ musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
+ new_mode);
+ } else {
+ controller->rx_mode = new_mode;
+ musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
+ new_mode);
+ }
+}
+
+static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ old_mode = controller->auto_req;
+ port = cppi41_channel->port_num;
+ new_mode = update_ep_mode(port, mode, old_mode);
+
+ if (new_mode == old_mode)
+ return;
+ controller->auto_req = new_mode;
+ musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
+}
+
+static bool cppi41_configure_channel(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct dma_chan *dc = cppi41_channel->dc;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ struct musb *musb = cppi41_channel->controller->musb;
+ unsigned use_gen_rndis = 0;
+
+ dev_dbg(musb->controller,
+ "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
+ cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
+ packet_sz, mode, (unsigned long long) dma_addr,
+ len, cppi41_channel->is_tx);
+
+ cppi41_channel->buf_addr = dma_addr;
+ cppi41_channel->total_len = len;
+ cppi41_channel->transferred = 0;
+ cppi41_channel->packet_sz = packet_sz;
+
+ /*
+ * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
+ * than max packet size at a time.
+ */
+ if (cppi41_channel->is_tx)
+ use_gen_rndis = 1;
+
+ if (use_gen_rndis) {
+ /* RNDIS mode */
+ if (len > packet_sz) {
+ musb_writel(musb->ctrl_base,
+ RNDIS_REG(cppi41_channel->port_num), len);
+ /* gen rndis */
+ cppi41_set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_GEN_RNDIS);
+
+ /* auto req */
+ cppi41_set_autoreq_mode(cppi41_channel,
+ EP_MODE_AUTOREG_ALL_NEOP);
+ } else {
+ musb_writel(musb->ctrl_base,
+ RNDIS_REG(cppi41_channel->port_num), 0);
+ cppi41_set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_TRANSPARENT);
+ cppi41_set_autoreq_mode(cppi41_channel,
+ EP_MODE_AUTOREG_NONE);
+ }
+ } else {
+ /* fallback mode */
+ cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
+ len = min_t(u32, packet_sz, len);
+ }
+ cppi41_channel->prog_len = len;
+ direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc)
+ return false;
+
+ dma_desc->callback = cppi41_dma_callback;
+ dma_desc->callback_param = channel;
+ cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+
+ save_rx_toggle(cppi41_channel);
+ dma_async_issue_pending(dc);
+ return true;
+}
+
+static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 is_tx)
+{
+ struct cppi41_dma_controller *controller = container_of(c,
+ struct cppi41_dma_controller, controller);
+ struct cppi41_dma_channel *cppi41_channel = NULL;
+ u8 ch_num = hw_ep->epnum - 1;
+
+ if (ch_num >= MUSB_DMA_NUM_CHANNELS)
+ return NULL;
+
+ if (is_tx)
+ cppi41_channel = &controller->tx_channel[ch_num];
+ else
+ cppi41_channel = &controller->rx_channel[ch_num];
+
+ if (!cppi41_channel->dc)
+ return NULL;
+
+ if (cppi41_channel->is_allocated)
+ return NULL;
+
+ cppi41_channel->hw_ep = hw_ep;
+ cppi41_channel->is_allocated = 1;
+
+ return &cppi41_channel->channel;
+}
+
+static void cppi41_dma_channel_release(struct dma_channel *channel)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+
+ if (cppi41_channel->is_allocated) {
+ cppi41_channel->is_allocated = 0;
+ channel->status = MUSB_DMA_STATUS_FREE;
+ channel->actual_len = 0;
+ }
+}
+
+static int cppi41_dma_channel_program(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ int ret;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ int hb_mult = 0;
+
+ BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ channel->status == MUSB_DMA_STATUS_BUSY);
+
+ if (is_host_active(cppi41_channel->controller->musb)) {
+ if (cppi41_channel->is_tx)
+ hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
+ else
+ hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
+ }
+
+ channel->status = MUSB_DMA_STATUS_BUSY;
+ channel->actual_len = 0;
+
+ if (hb_mult)
+ packet_sz = hb_mult * (packet_sz & 0x7FF);
+
+ ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
+ if (!ret)
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return ret;
+}
+
+static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
+ void *buf, u32 length)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->musb;
+
+ if (is_host_active(musb)) {
+ WARN_ON(1);
+ return 1;
+ }
+ if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
+ return 0;
+ if (cppi41_channel->is_tx)
+ return 1;
+ /* AM335x Advisory 1.0.13. No workaround for device RX mode */
+ return 0;
+}
+
+static int cppi41_dma_channel_abort(struct dma_channel *channel)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->musb;
+ void __iomem *epio = cppi41_channel->hw_ep->regs;
+ int tdbit;
+ int ret;
+ unsigned is_tx;
+ u16 csr;
+
+ is_tx = cppi41_channel->is_tx;
+ dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
+ cppi41_channel->port_num, is_tx);
+
+ if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
+ return 0;
+
+ list_del_init(&cppi41_channel->tx_check);
+ if (is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ csr |= MUSB_RXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+
+ tdbit = 1 << cppi41_channel->port_num;
+ if (is_tx)
+ tdbit <<= 16;
+
+ do {
+ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ ret = dmaengine_terminate_all(cppi41_channel->dc);
+ } while (ret == -EAGAIN);
+
+ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+
+ if (is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ }
+
+ cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ return 0;
+}
+
+static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
+{
+ struct dma_chan *dc;
+ int i;
+
+ for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
+ dc = ctrl->tx_channel[i].dc;
+ if (dc)
+ dma_release_channel(dc);
+ dc = ctrl->rx_channel[i].dc;
+ if (dc)
+ dma_release_channel(dc);
+ }
+}
+
+static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
+{
+ cppi41_release_all_dma_chans(controller);
+}
+
+static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+{
+ struct musb *musb = controller->musb;
+ struct device *dev = musb->controller;
+ struct device_node *np = dev->of_node;
+ struct cppi41_dma_channel *cppi41_channel;
+ int count;
+ int i;
+ int ret;
+
+ count = of_property_count_strings(np, "dma-names");
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ struct dma_chan *dc;
+ struct dma_channel *musb_dma;
+ const char *str;
+ unsigned is_tx;
+ unsigned int port;
+
+ ret = of_property_read_string_index(np, "dma-names", i, &str);
+ if (ret)
+ goto err;
+ if (!strncmp(str, "tx", 2))
+ is_tx = 1;
+ else if (!strncmp(str, "rx", 2))
+ is_tx = 0;
+ else {
+ dev_err(dev, "Wrong dmatype %s\n", str);
+ goto err;
+ }
+ ret = kstrtouint(str + 2, 0, &port);
+ if (ret)
+ goto err;
+
+ ret = -EINVAL;
+ if (port > MUSB_DMA_NUM_CHANNELS || !port)
+ goto err;
+ if (is_tx)
+ cppi41_channel = &controller->tx_channel[port - 1];
+ else
+ cppi41_channel = &controller->rx_channel[port - 1];
+
+ cppi41_channel->controller = controller;
+ cppi41_channel->port_num = port;
+ cppi41_channel->is_tx = is_tx;
+ INIT_LIST_HEAD(&cppi41_channel->tx_check);
+ INIT_WORK(&cppi41_channel->dma_completion,
+ cppi_trans_done_work);
+
+ musb_dma = &cppi41_channel->channel;
+ musb_dma->private_data = cppi41_channel;
+ musb_dma->status = MUSB_DMA_STATUS_FREE;
+ musb_dma->max_len = SZ_4M;
+
+ dc = dma_request_slave_channel(dev, str);
+ if (!dc) {
+ dev_err(dev, "Failed to request %s.\n", str);
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+ cppi41_channel->dc = dc;
+ }
+ return 0;
+err:
+ cppi41_release_all_dma_chans(controller);
+ return ret;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct cppi41_dma_controller *controller = container_of(c,
+ struct cppi41_dma_controller, controller);
+
+ hrtimer_cancel(&controller->early_tx);
+ cppi41_dma_controller_stop(controller);
+ kfree(controller);
+}
+
+struct dma_controller *dma_controller_create(struct musb *musb,
+ void __iomem *base)
+{
+ struct cppi41_dma_controller *controller;
+ int ret = 0;
+
+ if (!musb->controller->of_node) {
+ dev_err(musb->controller, "Need DT for the DMA engine.\n");
+ return NULL;
+ }
+
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ goto kzalloc_fail;
+
+ hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ controller->early_tx.function = cppi41_recheck_tx_req;
+ INIT_LIST_HEAD(&controller->early_tx_list);
+ controller->musb = musb;
+
+ controller->controller.channel_alloc = cppi41_dma_channel_allocate;
+ controller->controller.channel_release = cppi41_dma_channel_release;
+ controller->controller.channel_program = cppi41_dma_channel_program;
+ controller->controller.channel_abort = cppi41_dma_channel_abort;
+ controller->controller.is_compatible = cppi41_is_compatible;
+
+ ret = cppi41_dma_controller_start(controller);
+ if (ret)
+ goto plat_get_fail;
+ return &controller->controller;
+
+plat_get_fail:
+ kfree(controller);
+kzalloc_fail:
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
+ return NULL;
+}
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 9fc1db44c72..27ba8f79946 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -42,21 +42,17 @@
#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
-#define xprintk(level, facility, format, args...) do { \
- if (_dbg_level(level)) { \
- printk(facility "%s %d: " format , \
- __func__, __LINE__ , ## args); \
- } } while (0)
-
-extern unsigned musb_debug;
-
-static inline int _dbg_level(unsigned l)
+#ifdef CONFIG_DEBUG_FS
+int musb_init_debugfs(struct musb *musb);
+void musb_exit_debugfs(struct musb *musb);
+#else
+static inline int musb_init_debugfs(struct musb *musb)
{
- return musb_debug >= l;
+ return 0;
}
-
-#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
-
-extern const char *otg_state_string(struct musb *);
+static inline void musb_exit_debugfs(struct musb *musb)
+{
+}
+#endif
#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
new file mode 100644
index 00000000000..4c216790e86
--- /dev/null
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -0,0 +1,276 @@
+/*
+ * MUSB OTG driver debugfs support
+ *
+ * Copyright 2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/uaccess.h>
+
+#include "musb_core.h"
+#include "musb_debug.h"
+
+struct musb_register_map {
+ char *name;
+ unsigned offset;
+ unsigned size;
+};
+
+static const struct musb_register_map musb_regmap[] = {
+ { "FAddr", 0x00, 8 },
+ { "Power", 0x01, 8 },
+ { "Frame", 0x0c, 16 },
+ { "Index", 0x0e, 8 },
+ { "Testmode", 0x0f, 8 },
+ { "TxMaxPp", 0x10, 16 },
+ { "TxCSRp", 0x12, 16 },
+ { "RxMaxPp", 0x14, 16 },
+ { "RxCSR", 0x16, 16 },
+ { "RxCount", 0x18, 16 },
+ { "ConfigData", 0x1f, 8 },
+ { "DevCtl", 0x60, 8 },
+ { "MISC", 0x61, 8 },
+ { "TxFIFOsz", 0x62, 8 },
+ { "RxFIFOsz", 0x63, 8 },
+ { "TxFIFOadd", 0x64, 16 },
+ { "RxFIFOadd", 0x66, 16 },
+ { "VControl", 0x68, 32 },
+ { "HWVers", 0x6C, 16 },
+ { "EPInfo", 0x78, 8 },
+ { "RAMInfo", 0x79, 8 },
+ { "LinkInfo", 0x7A, 8 },
+ { "VPLen", 0x7B, 8 },
+ { "HS_EOF1", 0x7C, 8 },
+ { "FS_EOF1", 0x7D, 8 },
+ { "LS_EOF1", 0x7E, 8 },
+ { "SOFT_RST", 0x7F, 8 },
+ { "DMA_CNTLch0", 0x204, 16 },
+ { "DMA_ADDRch0", 0x208, 32 },
+ { "DMA_COUNTch0", 0x20C, 32 },
+ { "DMA_CNTLch1", 0x214, 16 },
+ { "DMA_ADDRch1", 0x218, 32 },
+ { "DMA_COUNTch1", 0x21C, 32 },
+ { "DMA_CNTLch2", 0x224, 16 },
+ { "DMA_ADDRch2", 0x228, 32 },
+ { "DMA_COUNTch2", 0x22C, 32 },
+ { "DMA_CNTLch3", 0x234, 16 },
+ { "DMA_ADDRch3", 0x238, 32 },
+ { "DMA_COUNTch3", 0x23C, 32 },
+ { "DMA_CNTLch4", 0x244, 16 },
+ { "DMA_ADDRch4", 0x248, 32 },
+ { "DMA_COUNTch4", 0x24C, 32 },
+ { "DMA_CNTLch5", 0x254, 16 },
+ { "DMA_ADDRch5", 0x258, 32 },
+ { "DMA_COUNTch5", 0x25C, 32 },
+ { "DMA_CNTLch6", 0x264, 16 },
+ { "DMA_ADDRch6", 0x268, 32 },
+ { "DMA_COUNTch6", 0x26C, 32 },
+ { "DMA_CNTLch7", 0x274, 16 },
+ { "DMA_ADDRch7", 0x278, 32 },
+ { "DMA_COUNTch7", 0x27C, 32 },
+ { } /* Terminating Entry */
+};
+
+static int musb_regdump_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned i;
+
+ seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+
+ for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
+ switch (musb_regmap[i].size) {
+ case 8:
+ seq_printf(s, "%-12s: %02x\n", musb_regmap[i].name,
+ musb_readb(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 16:
+ seq_printf(s, "%-12s: %04x\n", musb_regmap[i].name,
+ musb_readw(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 32:
+ seq_printf(s, "%-12s: %08x\n", musb_regmap[i].name,
+ musb_readl(musb->mregs, musb_regmap[i].offset));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int musb_regdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, musb_regdump_show, inode->i_private);
+}
+
+static int musb_test_mode_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned test;
+
+ test = musb_readb(musb->mregs, MUSB_TESTMODE);
+
+ if (test & MUSB_TEST_FORCE_HOST)
+ seq_printf(s, "force host\n");
+
+ if (test & MUSB_TEST_FIFO_ACCESS)
+ seq_printf(s, "fifo access\n");
+
+ if (test & MUSB_TEST_FORCE_FS)
+ seq_printf(s, "force full-speed\n");
+
+ if (test & MUSB_TEST_FORCE_HS)
+ seq_printf(s, "force high-speed\n");
+
+ if (test & MUSB_TEST_PACKET)
+ seq_printf(s, "test packet\n");
+
+ if (test & MUSB_TEST_K)
+ seq_printf(s, "test K\n");
+
+ if (test & MUSB_TEST_J)
+ seq_printf(s, "test J\n");
+
+ if (test & MUSB_TEST_SE0_NAK)
+ seq_printf(s, "test SE0 NAK\n");
+
+ return 0;
+}
+
+static const struct file_operations musb_regdump_fops = {
+ .open = musb_regdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int musb_test_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, musb_test_mode_show, inode->i_private);
+}
+
+static ssize_t musb_test_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct musb *musb = s->private;
+ u8 test = 0;
+ char buf[18];
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "force host", 9))
+ test = MUSB_TEST_FORCE_HOST;
+
+ if (!strncmp(buf, "fifo access", 11))
+ test = MUSB_TEST_FIFO_ACCESS;
+
+ if (!strncmp(buf, "force full-speed", 15))
+ test = MUSB_TEST_FORCE_FS;
+
+ if (!strncmp(buf, "force high-speed", 15))
+ test = MUSB_TEST_FORCE_HS;
+
+ if (!strncmp(buf, "test packet", 10)) {
+ test = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ }
+
+ if (!strncmp(buf, "test K", 6))
+ test = MUSB_TEST_K;
+
+ if (!strncmp(buf, "test J", 6))
+ test = MUSB_TEST_J;
+
+ if (!strncmp(buf, "test SE0 NAK", 12))
+ test = MUSB_TEST_SE0_NAK;
+
+ musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+
+ return count;
+}
+
+static const struct file_operations musb_test_mode_fops = {
+ .open = musb_test_mode_open,
+ .write = musb_test_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int musb_init_debugfs(struct musb *musb)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+
+ root = debugfs_create_dir(dev_name(musb->controller), NULL);
+ if (!root) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ file = debugfs_create_file("regdump", S_IRUGO, root, musb,
+ &musb_regdump_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR,
+ root, musb, &musb_test_mode_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ musb->debugfs_root = root;
+
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+
+err0:
+ return ret;
+}
+
+void /* __init_or_exit */ musb_exit_debugfs(struct musb *musb)
+{
+ debugfs_remove_recursive(musb->debugfs_root);
+}
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 0a2c4e3602c..1345a4ff041 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -62,13 +62,13 @@ struct musb_hw_ep;
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-#ifndef CONFIG_MUSB_PIO_ONLY
-#define is_dma_capable() (1)
-#else
+#ifdef CONFIG_MUSB_PIO_ONLY
#define is_dma_capable() (0)
+#else
+#define is_dma_capable() (1)
#endif
-#ifdef CONFIG_USB_TI_CPPI_DMA
+#if defined(CONFIG_USB_TI_CPPI_DMA) || defined(CONFIG_USB_TI_CPPI41_DMA)
#define is_cppi_enabled() 1
#else
#define is_cppi_enabled() 0
@@ -80,6 +80,17 @@ struct musb_hw_ep;
#define tusb_dma_omap() 0
#endif
+/* Anomaly 05000456 - USB Receive Interrupt Is Not Generated in DMA Mode 1
+ * Only allow DMA mode 1 to be used when the USB will actually generate the
+ * interrupts we expect.
+ */
+#ifdef CONFIG_BLACKFIN
+# undef USE_MODE1
+# if !ANOMALY_05000456
+# define USE_MODE1
+# endif
+#endif
+
/*
* DMA channel status ... updated by the dma controller driver whenever that
* status changes, and protected by the overall controller spinlock.
@@ -148,8 +159,6 @@ dma_channel_status(struct dma_channel *c)
* Controllers manage dma channels.
*/
struct dma_controller {
- int (*start)(struct dma_controller *);
- int (*stop)(struct dma_controller *);
struct dma_channel *(*channel_alloc)(struct dma_controller *,
struct musb_hw_ep *, u8 is_tx);
void (*channel_release)(struct dma_channel *);
@@ -158,15 +167,28 @@ struct dma_controller {
dma_addr_t dma_addr,
u32 length);
int (*channel_abort)(struct dma_channel *);
+ int (*is_compatible)(struct dma_channel *channel,
+ u16 maxpacket,
+ void *buf, u32 length);
};
/* called after channel_program(), may indicate a fault */
extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+#ifdef CONFIG_MUSB_PIO_ONLY
+static inline struct dma_controller *dma_controller_create(struct musb *m,
+ void __iomem *io)
+{
+ return NULL;
+}
+
+static inline void dma_controller_destroy(struct dma_controller *d) { }
-extern struct dma_controller *__init
-dma_controller_create(struct musb *, void __iomem *);
+#else
+
+extern struct dma_controller *dma_controller_create(struct musb *, void __iomem *);
extern void dma_controller_destroy(struct dma_controller *);
+#endif
#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
new file mode 100644
index 00000000000..09529f94e72
--- /dev/null
+++ b/drivers/usb/musb/musb_dsps.c
@@ -0,0 +1,830 @@
+/*
+ * Texas Instruments DSPS platforms "glue layer"
+ *
+ * Copyright (C) 2012, by Texas Instruments
+ *
+ * Based on the am35x "glue layer" code.
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ * musb_dsps.c will be a common file for all the TI DSPS platforms
+ * such as dm64x, dm36x, dm35x, da8x, am35x and ti81x.
+ * For now only ti81x is using this and in future davinci.c, am35x.c
+ * da8xx.c would be merged to this file after testing.
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/platform_data/usb-omap.h>
+#include <linux/sizes.h>
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/usb/of.h>
+
+#include <linux/debugfs.h>
+
+#include "musb_core.h"
+
+static const struct of_device_id musb_dsps_of_match[];
+
+/**
+ * avoid using musb_readx()/musb_writex() as glue layer should not be
+ * dependent on musb core layer symbols.
+ */
+static inline u8 dsps_readb(const void __iomem *addr, unsigned offset)
+ { return __raw_readb(addr + offset); }
+
+static inline u32 dsps_readl(const void __iomem *addr, unsigned offset)
+ { return __raw_readl(addr + offset); }
+
+static inline void dsps_writeb(void __iomem *addr, unsigned offset, u8 data)
+ { __raw_writeb(data, addr + offset); }
+
+static inline void dsps_writel(void __iomem *addr, unsigned offset, u32 data)
+ { __raw_writel(data, addr + offset); }
+
+/**
+ * DSPS musb wrapper register offset.
+ * FIXME: This should be expanded to have all the wrapper registers from TI DSPS
+ * musb ips.
+ */
+struct dsps_musb_wrapper {
+ u16 revision;
+ u16 control;
+ u16 status;
+ u16 epintr_set;
+ u16 epintr_clear;
+ u16 epintr_status;
+ u16 coreintr_set;
+ u16 coreintr_clear;
+ u16 coreintr_status;
+ u16 phy_utmi;
+ u16 mode;
+ u16 tx_mode;
+ u16 rx_mode;
+
+ /* bit positions for control */
+ unsigned reset:5;
+
+ /* bit positions for interrupt */
+ unsigned usb_shift:5;
+ u32 usb_mask;
+ u32 usb_bitmap;
+ unsigned drvvbus:5;
+
+ unsigned txep_shift:5;
+ u32 txep_mask;
+ u32 txep_bitmap;
+
+ unsigned rxep_shift:5;
+ u32 rxep_mask;
+ u32 rxep_bitmap;
+
+ /* bit positions for phy_utmi */
+ unsigned otg_disable:5;
+
+ /* bit positions for mode */
+ unsigned iddig:5;
+ unsigned iddig_mux:5;
+ /* miscellaneous stuff */
+ u8 poll_seconds;
+};
+
+/*
+ * register shadow for suspend
+ */
+struct dsps_context {
+ u32 control;
+ u32 epintr;
+ u32 coreintr;
+ u32 phy_utmi;
+ u32 mode;
+ u32 tx_mode;
+ u32 rx_mode;
+};
+
+/**
+ * DSPS glue structure.
+ */
+struct dsps_glue {
+ struct device *dev;
+ struct platform_device *musb; /* child musb pdev */
+ const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
+ struct timer_list timer; /* otg_workaround timer */
+ unsigned long last_timer; /* last timer data for each instance */
+
+ struct dsps_context context;
+ struct debugfs_regset32 regset;
+ struct dentry *dbgfs_root;
+};
+
+static const struct debugfs_reg32 dsps_musb_regs[] = {
+ { "revision", 0x00 },
+ { "control", 0x14 },
+ { "status", 0x18 },
+ { "eoi", 0x24 },
+ { "intr0_stat", 0x30 },
+ { "intr1_stat", 0x34 },
+ { "intr0_set", 0x38 },
+ { "intr1_set", 0x3c },
+ { "txmode", 0x70 },
+ { "rxmode", 0x74 },
+ { "autoreq", 0xd0 },
+ { "srpfixtime", 0xd4 },
+ { "tdown", 0xd8 },
+ { "phy_utmi", 0xe0 },
+ { "mode", 0xe8 },
+};
+
+static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->state));
+ del_timer(&glue->timer);
+ glue->last_timer = jiffies;
+ return;
+ }
+ if (musb->port_mode != MUSB_PORT_MODE_DUAL_ROLE)
+ return;
+
+ if (!musb->g.dev.driver)
+ return;
+
+ if (time_after(glue->last_timer, timeout) &&
+ timer_pending(&glue->timer)) {
+ dev_dbg(musb->controller,
+ "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ glue->last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&glue->timer, timeout);
+}
+
+/**
+ * dsps_musb_enable - enable interrupts
+ */
+static void dsps_musb_enable(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev->parent);
+ struct dsps_glue *glue = platform_get_drvdata(pdev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *reg_base = musb->ctrl_base;
+ u32 epmask, coremask;
+
+ /* Workaround: setup IRQs through both register sets. */
+ epmask = ((musb->epmask & wrp->txep_mask) << wrp->txep_shift) |
+ ((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift);
+ coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF);
+
+ dsps_writel(reg_base, wrp->epintr_set, epmask);
+ dsps_writel(reg_base, wrp->coreintr_set, coremask);
+ /* Force the DRVVBUS IRQ so we can start polling for ID change. */
+ dsps_writel(reg_base, wrp->coreintr_set,
+ (1 << wrp->drvvbus) << wrp->usb_shift);
+ dsps_musb_try_idle(musb, 0);
+}
+
+/**
+ * dsps_musb_disable - disable HDRC and flush interrupts
+ */
+static void dsps_musb_disable(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev->parent);
+ struct dsps_glue *glue = platform_get_drvdata(pdev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *reg_base = musb->ctrl_base;
+
+ dsps_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
+ dsps_writel(reg_base, wrp->epintr_clear,
+ wrp->txep_bitmap | wrp->rxep_bitmap);
+ dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+}
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ void __iomem *mregs = musb->mregs;
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ u8 devctl;
+ unsigned long flags;
+ int skip_session = 0;
+
+ /*
+ * We poll because DSPS IP's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = dsps_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ skip_session = 1;
+ /* fall */
+
+ case OTG_STATE_A_IDLE:
+ case OTG_STATE_B_IDLE:
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
+ dsps_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ dsps_writel(musb->ctrl_base, wrp->coreintr_set,
+ MUSB_INTR_VBUSERROR << wrp->usb_shift);
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static irqreturn_t dsps_interrupt(int irq, void *hci)
+{
+ struct musb *musb = hci;
+ void __iomem *reg_base = musb->ctrl_base;
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ u32 epintr, usbintr;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Get endpoint interrupts */
+ epintr = dsps_readl(reg_base, wrp->epintr_status);
+ musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift;
+ musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift;
+
+ if (epintr)
+ dsps_writel(reg_base, wrp->epintr_status, epintr);
+
+ /* Get usb core interrupts */
+ usbintr = dsps_readl(reg_base, wrp->coreintr_status);
+ if (!usbintr && !epintr)
+ goto out;
+
+ musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
+ if (usbintr)
+ dsps_writel(reg_base, wrp->coreintr_status, usbintr);
+
+ dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n",
+ usbintr, epintr);
+ /*
+ * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
+ * DSPS IP's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.SESSION per Mentor docs requires that we know its
+ * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+ */
+ if (is_host_active(musb) && usbintr & MUSB_INTR_BABBLE) {
+ pr_info("CAUTION: musb: Babble Interrupt Occurred\n");
+
+ /*
+ * When a babble condition occurs, the musb controller removes
+ * the session and is no longer in host mode. Hence, all
+ * devices connected to its root hub get disconnected.
+ *
+ * Hand this error down to the musb core isr, so it can
+ * recover.
+ */
+ musb->int_usb = MUSB_INTR_BABBLE | MUSB_INTR_DISCONNECT;
+ musb->int_tx = musb->int_rx = 0;
+ }
+
+ if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) {
+ int drvvbus = dsps_readl(reg_base, wrp->status);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = dsps_readb(mregs, MUSB_DEVCTL);
+ int err;
+
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
+ if (err) {
+ /*
+ * The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"-starting sessions
+ * without waiting for VBUS to stop registering in
+ * devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&glue->timer,
+ jiffies + wrp->poll_seconds * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (drvvbus) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ del_timer(&glue->timer);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ }
+
+ /* NOTE: this must complete power-on within 100 ms. */
+ dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ usb_otg_state_string(musb->xceiv->state),
+ err ? " ERROR" : "",
+ devctl);
+ ret = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ ret |= musb_interrupt(musb);
+
+ /* Poll for ID change in OTG port mode */
+ if (musb->xceiv->state == OTG_STATE_B_IDLE &&
+ musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
+out:
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
+{
+ struct dentry *root;
+ struct dentry *file;
+ char buf[128];
+
+ sprintf(buf, "%s.dsps", dev_name(musb->controller));
+ root = debugfs_create_dir(buf, NULL);
+ if (!root)
+ return -ENOMEM;
+ glue->dbgfs_root = root;
+
+ glue->regset.regs = dsps_musb_regs;
+ glue->regset.nregs = ARRAY_SIZE(dsps_musb_regs);
+ glue->regset.base = musb->ctrl_base;
+
+ file = debugfs_create_regset32("regdump", S_IRUGO, root, &glue->regset);
+ if (!file) {
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int dsps_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ struct platform_device *parent = to_platform_device(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *reg_base;
+ struct resource *r;
+ u32 rev, val;
+ int ret;
+
+ r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
+ if (!r)
+ return -EINVAL;
+
+ reg_base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+ musb->ctrl_base = reg_base;
+
+ /* NOP driver needs change if supporting dual instance */
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ if (IS_ERR(musb->xceiv))
+ return PTR_ERR(musb->xceiv);
+
+ /* Returns zero if e.g. not clocked */
+ rev = dsps_readl(reg_base, wrp->revision);
+ if (!rev)
+ return -ENODEV;
+
+ usb_phy_init(musb->xceiv);
+ setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
+
+ /* Reset the musb */
+ dsps_writel(reg_base, wrp->control, (1 << wrp->reset));
+
+ musb->isr = dsps_interrupt;
+
+ /* reset the otgdisable bit, needed for host mode to work */
+ val = dsps_readl(reg_base, wrp->phy_utmi);
+ val &= ~(1 << wrp->otg_disable);
+ dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
+
+ ret = dsps_musb_dbg_init(musb, glue);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dsps_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+
+ del_timer_sync(&glue->timer);
+ usb_phy_shutdown(musb->xceiv);
+ debugfs_remove_recursive(glue->dbgfs_root);
+
+ return 0;
+}
+
+static int dsps_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ void __iomem *ctrl_base = musb->ctrl_base;
+ u32 reg;
+
+ reg = dsps_readl(ctrl_base, wrp->mode);
+
+ switch (mode) {
+ case MUSB_HOST:
+ reg &= ~(1 << wrp->iddig);
+
+ /*
+ * if we're setting mode to host-only or device-only, we're
+ * going to ignore whatever the PHY sends us and just force
+ * ID pin status by SW
+ */
+ reg |= (1 << wrp->iddig_mux);
+
+ dsps_writel(ctrl_base, wrp->mode, reg);
+ dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ break;
+ case MUSB_PERIPHERAL:
+ reg |= (1 << wrp->iddig);
+
+ /*
+ * if we're setting mode to host-only or device-only, we're
+ * going to ignore whatever the PHY sends us and just force
+ * ID pin status by SW
+ */
+ reg |= (1 << wrp->iddig_mux);
+
+ dsps_writel(ctrl_base, wrp->mode, reg);
+ break;
+ case MUSB_OTG:
+ dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ break;
+ default:
+ dev_err(glue->dev, "unsupported mode %d\n", mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dsps_musb_reset(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+
+ dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset));
+ udelay(100);
+}
+
+static struct musb_platform_ops dsps_ops = {
+ .init = dsps_musb_init,
+ .exit = dsps_musb_exit,
+
+ .enable = dsps_musb_enable,
+ .disable = dsps_musb_disable,
+
+ .try_idle = dsps_musb_try_idle,
+ .set_mode = dsps_musb_set_mode,
+ .reset = dsps_musb_reset,
+};
+
+static u64 musb_dmamask = DMA_BIT_MASK(32);
+
+static int get_int_prop(struct device_node *dn, const char *s)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(dn, s, &val);
+ if (ret)
+ return 0;
+ return val;
+}
+
+static int get_musb_port_mode(struct device *dev)
+{
+ enum usb_dr_mode mode;
+
+ mode = of_usb_get_dr_mode(dev->of_node);
+ switch (mode) {
+ case USB_DR_MODE_HOST:
+ return MUSB_PORT_MODE_HOST;
+
+ case USB_DR_MODE_PERIPHERAL:
+ return MUSB_PORT_MODE_GADGET;
+
+ case USB_DR_MODE_UNKNOWN:
+ case USB_DR_MODE_OTG:
+ default:
+ return MUSB_PORT_MODE_DUAL_ROLE;
+ }
+}
+
+static int dsps_create_musb_pdev(struct dsps_glue *glue,
+ struct platform_device *parent)
+{
+ struct musb_hdrc_platform_data pdata;
+ struct resource resources[2];
+ struct resource *res;
+ struct device *dev = &parent->dev;
+ struct musb_hdrc_config *config;
+ struct platform_device *musb;
+ struct device_node *dn = parent->dev.of_node;
+ int ret;
+
+ memset(resources, 0, sizeof(resources));
+ res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
+ if (!res) {
+ dev_err(dev, "failed to get memory.\n");
+ return -EINVAL;
+ }
+ resources[0] = *res;
+
+ res = platform_get_resource_byname(parent, IORESOURCE_IRQ, "mc");
+ if (!res) {
+ dev_err(dev, "failed to get irq.\n");
+ return -EINVAL;
+ }
+ resources[1] = *res;
+
+ /* allocate the child platform device */
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(dev, "failed to allocate musb device\n");
+ return -ENOMEM;
+ }
+
+ musb->dev.parent = dev;
+ musb->dev.dma_mask = &musb_dmamask;
+ musb->dev.coherent_dma_mask = musb_dmamask;
+ musb->dev.of_node = of_node_get(dn);
+
+ glue->musb = musb;
+
+ ret = platform_device_add_resources(musb, resources,
+ ARRAY_SIZE(resources));
+ if (ret) {
+ dev_err(dev, "failed to add resources\n");
+ goto err;
+ }
+
+ config = devm_kzalloc(&parent->dev, sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ dev_err(dev, "failed to allocate musb hdrc config\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ pdata.config = config;
+ pdata.platform_ops = &dsps_ops;
+
+ config->num_eps = get_int_prop(dn, "mentor,num-eps");
+ config->ram_bits = get_int_prop(dn, "mentor,ram-bits");
+ config->host_port_deassert_reset_at_resume = 1;
+ pdata.mode = get_musb_port_mode(dev);
+ /* DT keeps this entry in mA, musb expects it as per USB spec */
+ pdata.power = get_int_prop(dn, "mentor,power") / 2;
+ config->multipoint = of_property_read_bool(dn, "mentor,multipoint");
+
+ ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
+ if (ret) {
+ dev_err(dev, "failed to add platform_data\n");
+ goto err;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(dev, "failed to register musb device\n");
+ goto err;
+ }
+ return 0;
+
+err:
+ platform_device_put(musb);
+ return ret;
+}
+
+static int dsps_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct dsps_musb_wrapper *wrp;
+ struct dsps_glue *glue;
+ int ret;
+
+ if (!strcmp(pdev->name, "musb-hdrc"))
+ return -ENODEV;
+
+ match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
+ if (!match) {
+ dev_err(&pdev->dev, "fail to get matching of_match struct\n");
+ return -EINVAL;
+ }
+ wrp = match->data;
+
+ /* allocate glue */
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "unable to allocate glue memory\n");
+ return -ENOMEM;
+ }
+
+ glue->dev = &pdev->dev;
+ glue->wrp = wrp;
+
+ platform_set_drvdata(pdev, glue);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
+ goto err2;
+ }
+
+ ret = dsps_create_musb_pdev(glue, pdev);
+ if (ret)
+ goto err3;
+
+ return 0;
+
+err3:
+ pm_runtime_put(&pdev->dev);
+err2:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int dsps_remove(struct platform_device *pdev)
+{
+ struct dsps_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+
+ /* disable usbss clocks */
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dsps_musb_wrapper am33xx_driver_data = {
+ .revision = 0x00,
+ .control = 0x14,
+ .status = 0x18,
+ .epintr_set = 0x38,
+ .epintr_clear = 0x40,
+ .epintr_status = 0x30,
+ .coreintr_set = 0x3c,
+ .coreintr_clear = 0x44,
+ .coreintr_status = 0x34,
+ .phy_utmi = 0xe0,
+ .mode = 0xe8,
+ .tx_mode = 0x70,
+ .rx_mode = 0x74,
+ .reset = 0,
+ .otg_disable = 21,
+ .iddig = 8,
+ .iddig_mux = 7,
+ .usb_shift = 0,
+ .usb_mask = 0x1ff,
+ .usb_bitmap = (0x1ff << 0),
+ .drvvbus = 8,
+ .txep_shift = 0,
+ .txep_mask = 0xffff,
+ .txep_bitmap = (0xffff << 0),
+ .rxep_shift = 16,
+ .rxep_mask = 0xfffe,
+ .rxep_bitmap = (0xfffe << 16),
+ .poll_seconds = 2,
+};
+
+static const struct of_device_id musb_dsps_of_match[] = {
+ { .compatible = "ti,musb-am33xx",
+ .data = (void *) &am33xx_driver_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, musb_dsps_of_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int dsps_suspend(struct device *dev)
+{
+ struct dsps_glue *glue = dev_get_drvdata(dev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ void __iomem *mbase = musb->ctrl_base;
+
+ glue->context.control = dsps_readl(mbase, wrp->control);
+ glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
+ glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
+ glue->context.phy_utmi = dsps_readl(mbase, wrp->phy_utmi);
+ glue->context.mode = dsps_readl(mbase, wrp->mode);
+ glue->context.tx_mode = dsps_readl(mbase, wrp->tx_mode);
+ glue->context.rx_mode = dsps_readl(mbase, wrp->rx_mode);
+
+ return 0;
+}
+
+static int dsps_resume(struct device *dev)
+{
+ struct dsps_glue *glue = dev_get_drvdata(dev);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+ struct musb *musb = platform_get_drvdata(glue->musb);
+ void __iomem *mbase = musb->ctrl_base;
+
+ dsps_writel(mbase, wrp->control, glue->context.control);
+ dsps_writel(mbase, wrp->epintr_set, glue->context.epintr);
+ dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
+ dsps_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
+ dsps_writel(mbase, wrp->mode, glue->context.mode);
+ dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+ dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume);
+
+static struct platform_driver dsps_usbss_driver = {
+ .probe = dsps_probe,
+ .remove = dsps_remove,
+ .driver = {
+ .name = "musb-dsps",
+ .pm = &dsps_pm_ops,
+ .of_match_table = musb_dsps_of_match,
+ },
+};
+
+MODULE_DESCRIPTION("TI DSPS MUSB Glue Layer");
+MODULE_AUTHOR("Ravi B <ravibabu@ti.com>");
+MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(dsps_usbss_driver);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index c7ebd0867fc..d4aa779339f 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -4,6 +4,7 @@
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -39,57 +40,102 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/stat.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include "musb_core.h"
-/* MUSB PERIPHERAL status 3-mar-2006:
- *
- * - EP0 seems solid. It passes both USBCV and usbtest control cases.
- * Minor glitches:
- *
- * + remote wakeup to Linux hosts work, but saw USBCV failures;
- * in one test run (operator error?)
- * + endpoint halt tests -- in both usbtest and usbcv -- seem
- * to break when dma is enabled ... is something wrongly
- * clearing SENDSTALL?
- *
- * - Mass storage behaved ok when last tested. Network traffic patterns
- * (with lots of short transfers etc) need retesting; they turn up the
- * worst cases of the DMA, since short packets are typical but are not
- * required.
- *
- * - TX/IN
- * + both pio and dma behave in with network and g_zero tests
- * + no cppi throughput issues other than no-hw-queueing
- * + failed with FLAT_REG (DaVinci)
- * + seems to behave with double buffering, PIO -and- CPPI
- * + with gadgetfs + AIO, requests got lost?
- *
- * - RX/OUT
- * + both pio and dma behave in with network and g_zero tests
- * + dma is slow in typical case (short_not_ok is clear)
- * + double buffering ok with PIO
- * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
- * + request lossage observed with gadgetfs
- *
- * - ISO not tested ... might work, but only weakly isochronous
- *
- * - Gadget driver disabling of softconnect during bind() is ignored; so
- * drivers can't hold off host requests until userspace is ready.
- * (Workaround: they can turn it off later.)
- *
- * - PORTABILITY (assumes PIO works):
- * + DaVinci, basically works with cppi dma
- * + OMAP 2430, ditto with mentor dma
- * + TUSB 6010, platform-specific dma in the works
- */
-
/* ----------------------------------------------------------------------- */
+#define is_buffer_mapped(req) (is_dma_capable() && \
+ (req->map_state != UN_MAPPED))
+
+/* Maps the buffer to dma */
+
+static inline void map_dma_buffer(struct musb_request *request,
+ struct musb *musb, struct musb_ep *musb_ep)
+{
+ int compatible = true;
+ struct dma_controller *dma = musb->dma_controller;
+
+ request->map_state = UN_MAPPED;
+
+ if (!is_dma_capable() || !musb_ep->dma)
+ return;
+
+ /* Check if DMA engine can handle this request.
+ * DMA code must reject the USB request explicitly.
+ * Default behaviour is to map the request.
+ */
+ if (dma->is_compatible)
+ compatible = dma->is_compatible(musb_ep->dma,
+ musb_ep->packet_sz, request->request.buf,
+ request->request.length);
+ if (!compatible)
+ return;
+
+ if (request->request.dma == DMA_ADDR_INVALID) {
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_addr = dma_map_single(
+ musb->controller,
+ request->request.buf,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ ret = dma_mapping_error(musb->controller, dma_addr);
+ if (ret)
+ return;
+
+ request->request.dma = dma_addr;
+ request->map_state = MUSB_MAPPED;
+ } else {
+ dma_sync_single_for_device(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->map_state = PRE_MAPPED;
+ }
+}
+
+/* Unmap the buffer from dma and maps it back to cpu */
+static inline void unmap_dma_buffer(struct musb_request *request,
+ struct musb *musb)
+{
+ struct musb_ep *musb_ep = request->ep;
+
+ if (!is_buffer_mapped(request) || !musb_ep->dma)
+ return;
+
+ if (request->request.dma == DMA_ADDR_INVALID) {
+ dev_vdbg(musb->controller,
+ "not unmapping a never mapped buffer\n");
+ return;
+ }
+ if (request->map_state == MUSB_MAPPED) {
+ dma_unmap_single(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->request.dma = DMA_ADDR_INVALID;
+ } else { /* PRE_MAPPED */
+ dma_sync_single_for_cpu(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ }
+ request->map_state = UN_MAPPED;
+}
+
/*
* Immediately complete a request.
*
@@ -110,37 +156,23 @@ __acquires(ep->musb->lock)
req = to_musb_request(request);
- list_del(&request->list);
+ list_del(&req->list);
if (req->request.status == -EINPROGRESS)
req->request.status = status;
musb = req->musb;
ep->busy = 1;
spin_unlock(&musb->lock);
- if (is_dma_capable()) {
- if (req->mapped) {
- dma_unmap_single(musb->controller,
- req->request.dma,
- req->request.length,
- req->tx
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- req->request.dma = DMA_ADDR_INVALID;
- req->mapped = 0;
- } else if (req->request.dma != DMA_ADDR_INVALID)
- dma_sync_single_for_cpu(musb->controller,
- req->request.dma,
- req->request.length,
- req->tx
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
+
+ if (!dma_mapping_error(&musb->g.dev, request->dma))
+ unmap_dma_buffer(req, musb);
+
if (request->status == 0)
- DBG(5, "%s done request %p, %d/%d\n",
+ dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
req->request.actual, req->request.length);
else
- DBG(2, "%s request %p, %d/%d fault %d\n",
+ dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
ep->end_point.name, request,
req->request.actual, req->request.length,
request->status);
@@ -157,6 +189,7 @@ __acquires(ep->musb->lock)
*/
static void nuke(struct musb_ep *ep, const int status)
{
+ struct musb *musb = ep->musb;
struct musb_request *req = NULL;
void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
@@ -165,9 +198,15 @@ static void nuke(struct musb_ep *ep, const int status)
if (is_dma_capable() && ep->dma) {
struct dma_controller *c = ep->musb->dma_controller;
int value;
+
if (ep->is_in) {
+ /*
+ * The programming guide says that we must not clear
+ * the DMAMODE bit before DMAENAB, so we only
+ * clear it in the second write...
+ */
musb_writew(epio, MUSB_TXCSR,
- 0 | MUSB_TXCSR_FLUSHFIFO);
+ MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_TXCSR,
0 | MUSB_TXCSR_FLUSHFIFO);
} else {
@@ -178,14 +217,14 @@ static void nuke(struct musb_ep *ep, const int status)
}
value = c->channel_abort(ep->dma);
- DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
+ dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
+ ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
- while (!list_empty(&(ep->req_list))) {
- req = container_of(ep->req_list.next, struct musb_request,
- request.list);
+ while (!list_empty(&ep->req_list)) {
+ req = list_first_entry(&ep->req_list, struct musb_request, list);
musb_g_giveback(ep, &req->request, status);
}
}
@@ -207,41 +246,6 @@ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
return ep->packet_sz;
}
-
-#ifdef CONFIG_USB_INVENTRA_DMA
-
-/* Peripheral tx (IN) using Mentor DMA works as follows:
- Only mode 0 is used for transfers <= wPktSize,
- mode 1 is used for larger transfers,
-
- One of the following happens:
- - Host sends IN token which causes an endpoint interrupt
- -> TxAvail
- -> if DMA is currently busy, exit.
- -> if queue is non-empty, txstate().
-
- - Request is queued by the gadget driver.
- -> if queue was previously empty, txstate()
-
- txstate()
- -> start
- /\ -> setup DMA
- | (data is transferred to the FIFO, then sent out when
- | IN token(s) are recd from Host.
- | -> DMA interrupt on completion
- | calls TxAvail.
- | -> stop DMA, ~DmaEenab,
- | -> set TxPktRdy for last short pkt or zlp
- | -> Complete Request
- | -> Continue next request (call txstate)
- |___________________________________|
-
- * Non-Mentor DMA engines can of course work differently, such as by
- * upleveling from irq-per-packet to irq-per-buffer.
- */
-
-#endif
-
/*
* An endpoint is transmitting data. This can be called either from
* the IRQ routine or from ep.queue() to kickstart a request on an
@@ -260,9 +264,16 @@ static void txstate(struct musb *musb, struct musb_request *req)
musb_ep = req->ep;
+ /* Check if EP is disabled */
+ if (!musb_ep->desc) {
+ dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_ep->end_point.name);
+ return;
+ }
+
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
- DBG(4, "dma pending...\n");
+ dev_dbg(musb->controller, "dma pending...\n");
return;
}
@@ -274,37 +285,37 @@ static void txstate(struct musb *musb, struct musb_request *req)
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
- DBG(5, "%s old packet still ready , txcsr %03x\n",
+ dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
- DBG(5, "%s stalling, txcsr %03x\n",
+ dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
- DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+ dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
epnum, musb_ep->packet_sz, fifo_count,
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
- if (is_dma_capable() && musb_ep->dma) {
+ if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min_t(size_t, request->length - request->actual,
+ musb_ep->dma->max_len);
- use_dma = (request->dma != DMA_ADDR_INVALID);
+ use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
/* MUSB_TXCSR_P_ISO is still set correctly */
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
{
- size_t request_size;
-
- /* setup DMA, then program endpoint CSR */
- request_size = min(request->length,
- musb_ep->dma->max_len);
- if (request_size <= musb_ep->packet_sz)
+ if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
musb_ep->dma->desired_mode = 1;
@@ -312,73 +323,101 @@ static void txstate(struct musb *musb, struct musb_request *req)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
musb_ep->dma->desired_mode,
- request->dma, request_size);
+ request->dma + request->actual, request_size);
if (use_dma) {
if (musb_ep->dma->desired_mode == 0) {
- /* ASSERT: DMAENAB is clear */
- csr &= ~(MUSB_TXCSR_AUTOSET |
- MUSB_TXCSR_DMAMODE);
+ /*
+ * We must not clear the DMAMODE bit
+ * before the DMAENAB bit -- and the
+ * latter doesn't always get cleared
+ * before we get here...
+ */
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB);
+ musb_writew(epio, MUSB_TXCSR, csr
+ | MUSB_TXCSR_P_WZC_BITS);
+ csr &= ~MUSB_TXCSR_DMAMODE;
csr |= (MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_MODE);
/* against programming guide */
- } else
- csr |= (MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAENAB
+ } else {
+ csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
-
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 0 Yes(Normal)
+ * 0 >0 No(High BW ISO)
+ * 1 0 Yes(HS bulk)
+ * 1 >0 Yes(FS bulk)
+ */
+ if (!musb_ep->hb_mult ||
+ (musb_ep->hb_mult &&
+ can_bulk_split(musb,
+ musb_ep->type)))
+ csr |= MUSB_TXCSR_AUTOSET;
+ }
csr &= ~MUSB_TXCSR_P_UNDERRUN;
+
musb_writew(epio, MUSB_TXCSR, csr);
}
}
-#elif defined(CONFIG_USB_TI_CPPI_DMA)
- /* program endpoint CSR first, then setup DMA */
- csr &= ~(MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAMODE
- | MUSB_TXCSR_P_UNDERRUN
- | MUSB_TXCSR_TXPKTRDY);
- csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
- musb_writew(epio, MUSB_TXCSR,
- (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
- | csr);
-
- /* ensure writebuffer is empty */
- csr = musb_readw(epio, MUSB_TXCSR);
-
- /* NOTE host side sets DMAENAB later than this; both are
- * OK since the transfer dma glue (between CPPI and Mentor
- * fifos) just tells CPPI it could start. Data only moves
- * to the USB TX fifo when both fifos are ready.
- */
-
- /* "mode" is irrelevant here; handle terminating ZLPs like
- * PIO does, since the hardware RNDIS mode seems unreliable
- * except for the last-packet-is-already-short case.
- */
- use_dma = use_dma && c->channel_program(
- musb_ep->dma, musb_ep->packet_sz,
- 0,
- request->dma,
- request->length);
- if (!use_dma) {
- c->channel_release(musb_ep->dma);
- musb_ep->dma = NULL;
- /* ASSERT: DMAENAB clear */
- csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
- /* invariant: prequest->buf is non-null */
- }
-#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
- use_dma = use_dma && c->channel_program(
- musb_ep->dma, musb_ep->packet_sz,
- request->zero,
- request->dma,
- request->length);
#endif
+ if (is_cppi_enabled()) {
+ /* program endpoint CSR first, then setup DMA */
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+ csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
+ MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
+ ~MUSB_TXCSR_P_UNDERRUN) | csr);
+
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /*
+ * NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and
+ * Mentor fifos) just tells CPPI it could start. Data
+ * only moves to the USB TX fifo when both fifos are
+ * ready.
+ */
+ /*
+ * "mode" is irrelevant here; handle terminating ZLPs
+ * like PIO does, since the hardware RNDIS mode seems
+ * unreliable except for the
+ * last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ 0,
+ request->dma + request->actual,
+ request_size);
+ if (!use_dma) {
+ c->channel_release(musb_ep->dma);
+ musb_ep->dma = NULL;
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* invariant: prequest->buf is non-null */
+ }
+ } else if (tusb_dma_omap())
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ request->zero,
+ request->dma + request->actual,
+ request_size);
}
#endif
if (!use_dma) {
+ /*
+ * Unmap the dma buffer back to cpu if dma channel
+ * programming fails
+ */
+ unmap_dma_buffer(req, musb);
+
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
request->actual += fifo_count;
@@ -388,7 +427,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
}
/* host may already have the data when this message shows... */
- DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+ dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
@@ -403,6 +442,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
void musb_g_tx(struct musb *musb, u8 epnum)
{
u16 csr;
+ struct musb_request *req;
struct usb_request *request;
u8 __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
@@ -410,177 +450,152 @@ void musb_g_tx(struct musb *musb, u8 epnum)
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
- request = next_request(musb_ep);
+ req = next_request(musb_ep);
+ request = &req->request;
csr = musb_readw(epio, MUSB_TXCSR);
- DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+ dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
- do {
- /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
- * probably rates reporting as a host error
- */
- if (csr & MUSB_TXCSR_P_SENTSTALL) {
- csr |= MUSB_TXCSR_P_WZC_BITS;
- csr &= ~MUSB_TXCSR_P_SENTSTALL;
- musb_writew(epio, MUSB_TXCSR, csr);
- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- dma->status = MUSB_DMA_STATUS_CORE_ABORT;
- musb->dma_controller->channel_abort(dma);
- }
- if (request)
- musb_g_giveback(musb_ep, request, -EPIPE);
+ /*
+ * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+ * probably rates reporting as a host error.
+ */
+ if (csr & MUSB_TXCSR_P_SENTSTALL) {
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~MUSB_TXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ return;
+ }
- break;
- }
+ if (csr & MUSB_TXCSR_P_UNDERRUN) {
+ /* We NAKed, no big deal... little reason to care. */
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
+ epnum, request);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /*
+ * SHOULD NOT HAPPEN... has with CPPI though, after
+ * changing SENDSTALL (and other cases); harmless?
+ */
+ dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
+ return;
+ }
+
+ if (request) {
+ u8 is_dma = 0;
- if (csr & MUSB_TXCSR_P_UNDERRUN) {
- /* we NAKed, no big deal ... little reason to care */
+ if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+ is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
- csr &= ~(MUSB_TXCSR_P_UNDERRUN
- | MUSB_TXCSR_TXPKTRDY);
+ csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
+ MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
musb_writew(epio, MUSB_TXCSR, csr);
- DBG(20, "underrun on ep%d, req %p\n", epnum, request);
- }
-
- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- /* SHOULD NOT HAPPEN ... has with cppi though, after
- * changing SENDSTALL (and other cases); harmless?
- */
- DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
- break;
+ /* Ensure writebuffer is empty. */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ request->actual += musb_ep->dma->actual_len;
+ dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
+ epnum, csr, musb_ep->dma->actual_len, request);
}
- if (request) {
- u8 is_dma = 0;
-
- if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
- is_dma = 1;
- csr |= MUSB_TXCSR_P_WZC_BITS;
- csr &= ~(MUSB_TXCSR_DMAENAB
- | MUSB_TXCSR_P_UNDERRUN
- | MUSB_TXCSR_TXPKTRDY);
- musb_writew(epio, MUSB_TXCSR, csr);
- /* ensure writebuffer is empty */
- csr = musb_readw(epio, MUSB_TXCSR);
- request->actual += musb_ep->dma->actual_len;
- DBG(4, "TXCSR%d %04x, dma off, "
- "len %zu, req %p\n",
- epnum, csr,
- musb_ep->dma->actual_len,
- request);
- }
-
- if (is_dma || request->actual == request->length) {
-
- /* First, maybe a terminating short packet.
- * Some DMA engines might handle this by
- * themselves.
- */
- if ((request->zero
- && request->length
- && (request->length
- % musb_ep->packet_sz)
- == 0)
-#ifdef CONFIG_USB_INVENTRA_DMA
- || (is_dma &&
- ((!dma->desired_mode) ||
- (request->actual &
- (musb_ep->packet_sz - 1))))
+ /*
+ * First, maybe a terminating short packet. Some DMA
+ * engines might handle this by themselves.
+ */
+ if ((request->zero && request->length
+ && (request->length % musb_ep->packet_sz == 0)
+ && (request->actual == request->length))
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+ || (is_dma && (!dma->desired_mode ||
+ (request->actual &
+ (musb_ep->packet_sz - 1))))
#endif
- ) {
- /* on dma completion, fifo may not
- * be available yet ...
- */
- if (csr & MUSB_TXCSR_TXPKTRDY)
- break;
-
- DBG(4, "sending zero pkt\n");
- musb_writew(epio, MUSB_TXCSR,
- MUSB_TXCSR_MODE
- | MUSB_TXCSR_TXPKTRDY);
- request->zero = 0;
- }
+ ) {
+ /*
+ * On DMA completion, FIFO may not be
+ * available yet...
+ */
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ return;
- /* ... or if not, then complete it */
- musb_g_giveback(musb_ep, request, 0);
+ dev_dbg(musb->controller, "sending zero pkt\n");
+ musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
+ | MUSB_TXCSR_TXPKTRDY);
+ request->zero = 0;
+ }
- /* kickstart next transfer if appropriate;
- * the packet that just completed might not
- * be transmitted for hours or days.
- * REVISIT for double buffering...
- * FIXME revisit for stalls too...
- */
- musb_ep_select(mbase, epnum);
- csr = musb_readw(epio, MUSB_TXCSR);
- if (csr & MUSB_TXCSR_FIFONOTEMPTY)
- break;
- request = musb_ep->desc
- ? next_request(musb_ep)
- : NULL;
- if (!request) {
- DBG(4, "%s idle now\n",
- musb_ep->end_point.name);
- break;
- }
+ if (request->actual == request->length) {
+ musb_g_giveback(musb_ep, request, 0);
+ /*
+ * In the giveback function the MUSB lock is
+ * released and acquired after sometime. During
+ * this time period the INDEX register could get
+ * changed by the gadget_queue function especially
+ * on SMP systems. Reselect the INDEX to be sure
+ * we are reading/modifying the right registers
+ */
+ musb_ep_select(mbase, epnum);
+ req = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!req) {
+ dev_dbg(musb->controller, "%s idle now\n",
+ musb_ep->end_point.name);
+ return;
}
-
- txstate(musb, to_musb_request(request));
}
- } while (0);
+ txstate(musb, req);
+ }
}
/* ------------------------------------------------------------ */
-#ifdef CONFIG_USB_INVENTRA_DMA
-
-/* Peripheral rx (OUT) using Mentor DMA works as follows:
- - Only mode 0 is used.
-
- - Request is queued by the gadget class driver.
- -> if queue was previously empty, rxstate()
-
- - Host sends OUT token which causes an endpoint interrupt
- /\ -> RxReady
- | -> if request queued, call rxstate
- | /\ -> setup DMA
- | | -> DMA interrupt on completion
- | | -> RxReady
- | | -> stop DMA
- | | -> ack the read
- | | -> if data recd = max expected
- | | by the request, or host
- | | sent a short packet,
- | | complete the request,
- | | and start the next one.
- | |_____________________________________|
- | else just wait for the host
- | to send the next OUT token.
- |__________________________________________________|
-
- * Non-Mentor DMA engines can of course work differently.
- */
-
-#endif
-
/*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void rxstate(struct musb *musb, struct musb_request *req)
{
- u16 csr = 0;
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
- unsigned fifo_count = 0;
- u16 len = musb_ep->packet_sz;
+ unsigned len = 0;
+ u16 fifo_count;
+ u16 csr = musb_readw(epio, MUSB_RXCSR);
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+ u8 use_mode_1;
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
- csr = musb_readw(epio, MUSB_RXCSR);
+ fifo_count = musb_ep->packet_sz;
+
+ /* Check if EP is disabled */
+ if (!musb_ep->desc) {
+ dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_ep->end_point.name);
+ return;
+ }
- if (is_cppi_enabled() && musb_ep->dma) {
+ /* We shouldn't get here while DMA is active, but we do... */
+ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+ dev_dbg(musb->controller, "DMA pending...\n");
+ return;
+ }
+
+ if (csr & MUSB_RXCSR_P_SENDSTALL) {
+ dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ if (is_cppi_enabled() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
@@ -608,13 +623,26 @@ static void rxstate(struct musb *musb, struct musb_request *req)
}
if (csr & MUSB_RXCSR_RXPKTRDY) {
- len = musb_readw(epio, MUSB_RXCOUNT);
+ fifo_count = musb_readw(epio, MUSB_RXCOUNT);
+
+ /*
+ * Enable Mode 1 on RX transfers only when short_not_ok flag
+ * is set. Currently short_not_ok flag is set only from
+ * file_storage and f_mass_storage drivers
+ */
+
+ if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
+ use_mode_1 = 1;
+ else
+ use_mode_1 = 0;
+
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
- if (is_dma_capable() && musb_ep->dma) {
+ if (is_buffer_mapped(req)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
+ unsigned int transfer_size;
c = musb->dma_controller;
channel = musb_ep->dma;
@@ -630,7 +658,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
- * we don't get DMA completion interrrupt for short packets.
+ * we don't get DMA completion interrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
@@ -640,57 +668,111 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* then becomes usable as a runtime "use mode 1" hint...
*/
- csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
- csr |= MUSB_RXCSR_AUTOCLEAR;
- /* csr |= MUSB_RXCSR_DMAMODE; */
+ /* Experimental: Mode1 works with mass storage use cases */
+ if (use_mode_1) {
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ /*
+ * this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR, csr);
- /* this special sequence (enabling and then
- * disabling MUSB_RXCSR_DMAMODE) is required
- * to get DMAReq to activate
- */
- musb_writew(epio, MUSB_RXCSR,
- csr | MUSB_RXCSR_DMAMODE);
-#endif
- musb_writew(epio, MUSB_RXCSR, csr);
+ transfer_size = min_t(unsigned int,
+ request->length -
+ request->actual,
+ channel->max_len);
+ musb_ep->dma->desired_mode = 1;
+ } else {
+ if (!musb_ep->hb_mult &&
+ musb_ep->hw_ep->rx_double_buffered)
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ transfer_size = min(request->length - request->actual,
+ (unsigned)fifo_count);
+ musb_ep->dma->desired_mode = 0;
+ }
- if (request->actual < request->length) {
- int transfer_size = 0;
-#ifdef USE_MODE1
- transfer_size = min(request->length,
+ use_dma = c->channel_program(
+ channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ request->dma
+ + request->actual,
+ transfer_size);
+
+ if (use_dma)
+ return;
+ }
+#elif defined(CONFIG_USB_UX500_DMA)
+ if ((is_buffer_mapped(req)) &&
+ (request->actual < request->length)) {
+
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ unsigned int transfer_size = 0;
+
+ c = musb->dma_controller;
+ channel = musb_ep->dma;
+
+ /* In case first packet is short */
+ if (fifo_count < musb_ep->packet_sz)
+ transfer_size = fifo_count;
+ else if (request->short_not_ok)
+ transfer_size = min_t(unsigned int,
+ request->length -
+ request->actual,
channel->max_len);
-#else
- transfer_size = len;
-#endif
- if (transfer_size <= musb_ep->packet_sz)
- musb_ep->dma->desired_mode = 0;
- else
- musb_ep->dma->desired_mode = 1;
+ else
+ transfer_size = min_t(unsigned int,
+ request->length -
+ request->actual,
+ (unsigned)fifo_count);
- use_dma = c->channel_program(
- channel,
+ csr &= ~MUSB_RXCSR_DMAMODE;
+ csr |= (MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_AUTOCLEAR);
+
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (transfer_size <= musb_ep->packet_sz) {
+ musb_ep->dma->desired_mode = 0;
+ } else {
+ musb_ep->dma->desired_mode = 1;
+ /* Mode must be set after DMAENAB */
+ csr |= MUSB_RXCSR_DMAMODE;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ if (c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
- transfer_size);
- }
+ transfer_size))
- if (use_dma)
return;
}
#endif /* Mentor's DMA */
- fifo_count = request->length - request->actual;
- DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ len = request->length - request->actual;
+ dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
musb_ep->end_point.name,
- len, fifo_count,
+ fifo_count, len,
musb_ep->packet_sz);
fifo_count = min_t(unsigned, len, fifo_count);
#ifdef CONFIG_USB_TUSB_OMAP_DMA
- if (tusb_dma_omap() && musb_ep->dma) {
+ if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
@@ -705,6 +787,21 @@ static void rxstate(struct musb *musb, struct musb_request *req)
return;
}
#endif
+ /*
+ * Unmap the dma buffer back to cpu if dma channel
+ * programming fails. This buffer is mapped if the
+ * channel allocation is successful
+ */
+ if (is_buffer_mapped(req)) {
+ unmap_dma_buffer(req, musb);
+
+ /*
+ * Clear DMAENAB and AUTOCLEAR for the
+ * PIO mode transfer
+ */
+ csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
@@ -722,7 +819,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
}
/* reach the end or short packet detected */
- if (request->actual == request->length || len < musb_ep->packet_sz)
+ if (request->actual == request->length ||
+ fifo_count < musb_ep->packet_sz)
musb_g_giveback(musb_ep, request, 0);
}
@@ -732,36 +830,38 @@ static void rxstate(struct musb *musb, struct musb_request *req)
void musb_g_rx(struct musb *musb, u8 epnum)
{
u16 csr;
+ struct musb_request *req;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
musb_ep_select(mbase, epnum);
- request = next_request(musb_ep);
+ req = next_request(musb_ep);
+ if (!req)
+ return;
+
+ request = &req->request;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
- DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+ dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
csr, dma ? " (dma)" : "", request);
if (csr & MUSB_RXCSR_P_SENTSTALL) {
- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- dma->status = MUSB_DMA_STATUS_CORE_ABORT;
- (void) musb->dma_controller->channel_abort(dma);
- request->actual += musb_ep->dma->actual_len;
- }
-
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_RXCSR, csr);
-
- if (request)
- musb_g_giveback(musb_ep, request, -EPIPE);
- goto done;
+ return;
}
if (csr & MUSB_RXCSR_P_OVERRUN) {
@@ -769,21 +869,20 @@ void musb_g_rx(struct musb *musb, u8 epnum)
csr &= ~MUSB_RXCSR_P_OVERRUN;
musb_writew(epio, MUSB_RXCSR, csr);
- DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
- if (request && request->status == -EINPROGRESS)
+ dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
+ if (request->status == -EINPROGRESS)
request->status = -EOVERFLOW;
}
if (csr & MUSB_RXCSR_INCOMPRX) {
/* REVISIT not necessarily an error */
- DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
+ dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/* "should not happen"; likely RXPKTRDY pending for DMA */
- DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
- "%s busy, csr %04x\n",
+ dev_dbg(musb->controller, "%s busy, csr %04x\n",
musb_ep->end_point.name, csr);
- goto done;
+ return;
}
if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
@@ -795,14 +894,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
request->actual += musb_ep->dma->actual_len;
- DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
+ dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
epnum, csr,
musb_readw(epio, MUSB_RXCSR),
musb_ep->dma->actual_len, request);
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
+ defined(CONFIG_USB_UX500_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
- if ((dma->desired_mode == 0)
+ if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
|| (dma->actual_len
& (musb_ep->packet_sz - 1))) {
/* ack the read! */
@@ -813,33 +913,38 @@ void musb_g_rx(struct musb *musb, u8 epnum)
/* incomplete, and not short? wait for next IN packet */
if ((request->actual < request->length)
&& (musb_ep->dma->actual_len
- == musb_ep->packet_sz))
- goto done;
+ == musb_ep->packet_sz)) {
+ /* In double buffer case, continue to unload fifo if
+ * there is Rx packet in FIFO.
+ **/
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if ((csr & MUSB_RXCSR_RXPKTRDY) &&
+ hw_ep->rx_double_buffered)
+ goto exit;
+ return;
+ }
#endif
musb_g_giveback(musb_ep, request, 0);
-
- request = next_request(musb_ep);
- if (!request)
- goto done;
-
- /* don't start more i/o till the stall clears */
+ /*
+ * In the giveback function the MUSB lock is
+ * released and acquired after sometime. During
+ * this time period the INDEX register could get
+ * changed by the gadget_queue function especially
+ * on SMP systems. Reselect the INDEX to be sure
+ * we are reading/modifying the right registers
+ */
musb_ep_select(mbase, epnum);
- csr = musb_readw(epio, MUSB_RXCSR);
- if (csr & MUSB_RXCSR_P_SENDSTALL)
- goto done;
- }
-
-
- /* analyze request if the ep is hot */
- if (request)
- rxstate(musb, to_musb_request(request));
- else
- DBG(3, "packet waiting for %s%s request\n",
- musb_ep->desc ? "" : "inactive ",
- musb_ep->end_point.name);
-done:
- return;
+ req = next_request(musb_ep);
+ if (!req)
+ return;
+ }
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
+ defined(CONFIG_USB_UX500_DMA)
+exit:
+#endif
+ /* Analyze request */
+ rxstate(musb, req);
}
/* ------------------------------------------------------------ */
@@ -881,32 +986,61 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
- tmp = le16_to_cpu(desc->wMaxPacketSize);
- if (tmp & ~0x07ff)
- goto fail;
- musb_ep->packet_sz = tmp;
+ tmp = usb_endpoint_maxp(desc);
+ if (tmp & ~0x07ff) {
+ int ok;
+
+ if (usb_endpoint_dir_in(desc))
+ ok = musb->hb_iso_tx;
+ else
+ ok = musb->hb_iso_rx;
+
+ if (!ok) {
+ dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
+ goto fail;
+ }
+ musb_ep->hb_mult = (tmp >> 11) & 3;
+ } else {
+ musb_ep->hb_mult = 0;
+ }
+
+ musb_ep->packet_sz = tmp & 0x7ff;
+ tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
/* enable the interrupts for the endpoint, set the endpoint
* packet size (or fail), set the mode, clear the fifo
*/
musb_ep_select(mbase, epnum);
if (usb_endpoint_dir_in(desc)) {
- u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
if (hw_ep->is_shared_fifo)
musb_ep->is_in = 1;
if (!musb_ep->is_in)
goto fail;
- if (tmp > hw_ep->max_packet_sz_tx)
+
+ if (tmp > hw_ep->max_packet_sz_tx) {
+ dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
goto fail;
+ }
- int_txe |= (1 << epnum);
- musb_writew(mbase, MUSB_INTRTXE, int_txe);
+ musb->intrtxe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
- musb_writew(regs, MUSB_TXMAXP, tmp);
+ /* Set TXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode.
+ */
+ if (musb->double_buffer_not_ok) {
+ musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+ } else {
+ if (can_bulk_split(musb, musb_ep->type))
+ musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
+ musb_ep->packet_sz) - 1;
+ musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
+ | (musb_ep->hb_mult << 11));
+ }
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
@@ -921,22 +1055,31 @@ static int musb_gadget_enable(struct usb_ep *ep,
musb_writew(regs, MUSB_TXCSR, csr);
} else {
- u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
if (hw_ep->is_shared_fifo)
musb_ep->is_in = 0;
if (musb_ep->is_in)
goto fail;
- if (tmp > hw_ep->max_packet_sz_rx)
+
+ if (tmp > hw_ep->max_packet_sz_rx) {
+ dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
goto fail;
+ }
- int_rxe |= (1 << epnum);
- musb_writew(mbase, MUSB_INTRRXE, int_rxe);
+ musb->intrrxe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
/* REVISIT if can_bulk_combine() use by updating "tmp"
* likewise high bandwidth periodic rx
*/
- musb_writew(regs, MUSB_RXMAXP, tmp);
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode.
+ */
+ if (musb->double_buffer_not_ok)
+ musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
+ else
+ musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
+ | (musb_ep->hb_mult << 11));
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
@@ -969,6 +1112,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
musb_ep->desc = desc;
musb_ep->busy = 0;
+ musb_ep->wedged = 0;
status = 0;
pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
@@ -977,7 +1121,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
case USB_ENDPOINT_XFER_INT: s = "int"; break;
default: s = "iso"; break;
- }; s; }),
+ } s; }),
musb_ep->is_in ? "IN" : "OUT",
musb_ep->dma ? "dma, " : "",
musb_ep->packet_sz);
@@ -1011,18 +1155,17 @@ static int musb_gadget_disable(struct usb_ep *ep)
/* zero the endpoint sizes */
if (musb_ep->is_in) {
- u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
- int_txe &= ~(1 << epnum);
- musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
+ musb->intrtxe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
musb_writew(epio, MUSB_TXMAXP, 0);
} else {
- u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
- int_rxe &= ~(1 << epnum);
- musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
+ musb->intrrxe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
musb_writew(epio, MUSB_RXMAXP, 0);
}
musb_ep->desc = NULL;
+ musb_ep->end_point.desc = NULL;
/* abort all pending DMA and requests */
nuke(musb_ep, -ESHUTDOWN);
@@ -1031,7 +1174,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
spin_unlock_irqrestore(&(musb->lock), flags);
- DBG(2, "%s\n", musb_ep->end_point.name);
+ dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
return status;
}
@@ -1043,16 +1186,19 @@ static int musb_gadget_disable(struct usb_ep *ep)
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb *musb = musb_ep->musb;
struct musb_request *request = NULL;
request = kzalloc(sizeof *request, gfp_flags);
- if (request) {
- INIT_LIST_HEAD(&request->request.list);
- request->request.dma = DMA_ADDR_INVALID;
- request->epnum = musb_ep->current_epnum;
- request->ep = musb_ep;
+ if (!request) {
+ dev_dbg(musb->controller, "not enough memory\n");
+ return NULL;
}
+ request->request.dma = DMA_ADDR_INVALID;
+ request->epnum = musb_ep->current_epnum;
+ request->ep = musb_ep;
+
return &request->request;
}
@@ -1077,9 +1223,9 @@ struct free_record {
/*
* Context: controller locked, IRQs blocked.
*/
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
- DBG(3, "<== %s request %p len %u on hw_ep%d\n",
+ dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
req->tx ? "TX/IN" : "RX/OUT",
&req->request, req->request.length, req->epnum);
@@ -1113,7 +1259,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
if (request->ep != musb_ep)
return -EINVAL;
- DBG(4, "<== to %s request=%p\n", ep->name, req);
+ dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
/* request is mine now... */
request->request.actual = 0;
@@ -1121,48 +1267,27 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
request->epnum = musb_ep->current_epnum;
request->tx = musb_ep->is_in;
- if (is_dma_capable() && musb_ep->dma) {
- if (request->request.dma == DMA_ADDR_INVALID) {
- request->request.dma = dma_map_single(
- musb->controller,
- request->request.buf,
- request->request.length,
- request->tx
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- request->mapped = 1;
- } else {
- dma_sync_single_for_device(musb->controller,
- request->request.dma,
- request->request.length,
- request->tx
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- request->mapped = 0;
- }
- } else if (!req->buf) {
- return -ENODATA;
- } else
- request->mapped = 0;
+ map_dma_buffer(request, musb, musb_ep);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
if (!musb_ep->desc) {
- DBG(4, "req %p queued to %s while ep %s\n",
+ dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
req, ep->name, "disabled");
status = -ESHUTDOWN;
- goto cleanup;
+ unmap_dma_buffer(request, musb);
+ goto unlock;
}
/* add request to the list */
- list_add_tail(&(request->request.list), &(musb_ep->req_list));
+ list_add_tail(&request->list, &musb_ep->req_list);
/* it this is the head of the queue, start i/o ... */
- if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
+ if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
musb_ep_restart(musb, request);
-cleanup:
+unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
return status;
}
@@ -1170,7 +1295,8 @@ cleanup:
static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
- struct usb_request *r;
+ struct musb_request *req = to_musb_request(request);
+ struct musb_request *r;
unsigned long flags;
int status = 0;
struct musb *musb = musb_ep->musb;
@@ -1181,17 +1307,17 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry(r, &musb_ep->req_list, list) {
- if (r == request)
+ if (r == req)
break;
}
- if (r != request) {
- DBG(3, "request %p not queued to %s\n", request, ep->name);
+ if (r != req) {
+ dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
status = -EINVAL;
goto done;
}
/* if the hardware doesn't have the request, easy ... */
- if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+ if (musb_ep->req_list.next != &req->list || musb_ep->busy)
musb_g_giveback(musb_ep, request, -ECONNRESET);
/* ... else abort the dma transfer ... */
@@ -1223,7 +1349,7 @@ done:
*
* exported to ep0 code
*/
-int musb_gadget_set_halt(struct usb_ep *ep, int value)
+static int musb_gadget_set_halt(struct usb_ep *ep, int value)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
u8 epnum = musb_ep->current_epnum;
@@ -1232,7 +1358,7 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
void __iomem *mbase;
unsigned long flags;
u16 csr;
- struct musb_request *request = NULL;
+ struct musb_request *request;
int status = 0;
if (!ep)
@@ -1248,24 +1374,30 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
musb_ep_select(mbase, epnum);
- /* cannot portably stall with non-empty FIFO */
- request = to_musb_request(next_request(musb_ep));
- if (value && musb_ep->is_in) {
- csr = musb_readw(epio, MUSB_TXCSR);
- if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
- DBG(3, "%s fifo busy, cannot halt\n", ep->name);
- spin_unlock_irqrestore(&musb->lock, flags);
- return -EAGAIN;
+ request = next_request(musb_ep);
+ if (value) {
+ if (request) {
+ dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
+ ep->name);
+ status = -EAGAIN;
+ goto done;
}
-
- }
+ /* Cannot portably stall with non-empty FIFO */
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
+ status = -EAGAIN;
+ goto done;
+ }
+ }
+ } else
+ musb_ep->wedged = 0;
/* set/clear the stall and toggle bits */
- DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+ dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
- if (csr & MUSB_TXCSR_FIFONOTEMPTY)
- csr |= MUSB_TXCSR_FLUSHFIFO;
csr |= MUSB_TXCSR_P_WZC_BITS
| MUSB_TXCSR_CLRDATATOG;
if (value)
@@ -1288,18 +1420,32 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
musb_writew(epio, MUSB_RXCSR, csr);
}
-done:
-
/* maybe start the first request in the queue */
if (!musb_ep->busy && !value && request) {
- DBG(3, "restarting the request\n");
+ dev_dbg(musb->controller, "restarting the request\n");
musb_ep_restart(musb, request);
}
+done:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
+/*
+ * Sets the halt feature with the clear requests ignored
+ */
+static int musb_gadget_set_wedge(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+
+ if (!ep)
+ return -EINVAL;
+
+ musb_ep->wedged = 1;
+
+ return usb_ep_set_halt(ep);
+}
+
static int musb_gadget_fifo_status(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
@@ -1331,7 +1477,7 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
void __iomem *epio = musb->endpoints[epnum].regs;
void __iomem *mbase;
unsigned long flags;
- u16 csr, int_txe;
+ u16 csr;
mbase = musb->mregs;
@@ -1339,13 +1485,18 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
musb_ep_select(mbase, (u8) epnum);
/* disable interrupts */
- int_txe = musb_readw(mbase, MUSB_INTRTXE);
- musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+ musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+ /*
+ * Setting both TXPKTRDY and FLUSHFIFO makes controller
+ * to interrupt current FIFO loading, but not flushing
+ * the already loaded ones.
+ */
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
musb_writew(epio, MUSB_TXCSR, csr);
@@ -1358,7 +1509,7 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
}
/* re-enable interrupt */
- musb_writew(mbase, MUSB_INTRTXE, int_txe);
+ musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
spin_unlock_irqrestore(&musb->lock, flags);
}
@@ -1370,6 +1521,7 @@ static const struct usb_ep_ops musb_ep_ops = {
.queue = musb_gadget_queue,
.dequeue = musb_gadget_dequeue,
.set_halt = musb_gadget_set_halt,
+ .set_wedge = musb_gadget_set_wedge,
.fifo_status = musb_gadget_fifo_status,
.fifo_flush = musb_gadget_fifo_flush
};
@@ -1394,7 +1546,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_PERIPHERAL:
/* NOTE: OTG state machine doesn't include B_SUSPENDED;
* that's part of the standard usb 1.1 state machine, and
@@ -1406,7 +1558,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
case OTG_STATE_B_IDLE:
/* Start SRP ... OTG not required. */
devctl = musb_readb(mregs, MUSB_DEVCTL);
- DBG(2, "Sending SRP: devctl: %02x\n", devctl);
+ dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(mregs, MUSB_DEVCTL);
@@ -1423,6 +1575,10 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
break;
}
+ spin_unlock_irqrestore(&musb->lock, flags);
+ otg_start_srp(musb->xceiv->otg);
+ spin_lock_irqsave(&musb->lock, flags);
+
/* Block idling for at least 1s */
musb_platform_try_idle(musb,
jiffies + msecs_to_jiffies(1 * HZ));
@@ -1430,7 +1586,8 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
status = 0;
goto done;
default:
- DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
+ dev_dbg(musb->controller, "Unhandled wake: %s\n",
+ usb_otg_state_string(musb->xceiv->state));
goto done;
}
@@ -1439,7 +1596,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
power = musb_readb(mregs, MUSB_POWER);
power |= MUSB_POWER_RESUME;
musb_writeb(mregs, MUSB_POWER, power);
- DBG(2, "issue wakeup\n");
+ dev_dbg(musb->controller, "issue wakeup\n");
/* FIXME do this next chunk in a timer callback, no udelay */
mdelay(2);
@@ -1473,15 +1630,15 @@ static void musb_pullup(struct musb *musb, int is_on)
/* FIXME if on, HdrcStart; if off, HdrcStop */
- DBG(3, "gadget %s D+ pullup %s\n",
- musb->gadget_driver->function, is_on ? "on" : "off");
+ dev_dbg(musb->controller, "gadget D+ pullup %s\n",
+ is_on ? "on" : "off");
musb_writeb(musb->mregs, MUSB_POWER, power);
}
#if 0
static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
{
- DBG(2, "<= %s =>\n", __func__);
+ dev_dbg(musb->controller, "<= %s =>\n", __func__);
/*
* FIXME iff driver's softconnect flag is set (as it is during probe,
@@ -1496,9 +1653,9 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct musb *musb = gadget_to_musb(gadget);
- if (!musb->xceiv.set_power)
+ if (!musb->xceiv->set_power)
return -EOPNOTSUPP;
- return otg_set_power(&musb->xceiv, mA);
+ return usb_phy_set_power(musb->xceiv, mA);
}
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
@@ -1508,6 +1665,8 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
is_on = !!is_on;
+ pm_runtime_get_sync(musb->controller);
+
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
@@ -1517,9 +1676,17 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
musb_pullup(musb, is_on);
}
spin_unlock_irqrestore(&musb->lock, flags);
+
+ pm_runtime_put(musb->controller);
+
return 0;
}
+static int musb_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int musb_gadget_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops musb_gadget_operations = {
.get_frame = musb_gadget_get_frame,
.wakeup = musb_gadget_wakeup,
@@ -1527,6 +1694,8 @@ static const struct usb_gadget_ops musb_gadget_operations = {
/* .vbus_session = musb_gadget_vbus_session, */
.vbus_draw = musb_gadget_vbus_draw,
.pullup = musb_gadget_pullup,
+ .udc_start = musb_gadget_start,
+ .udc_stop = musb_gadget_stop,
};
/* ----------------------------------------------------------------------- */
@@ -1537,16 +1706,8 @@ static const struct usb_gadget_ops musb_gadget_operations = {
* about there being only one external upstream port. It assumes
* all peripheral ports are external...
*/
-static struct musb *the_gadget;
-static void musb_gadget_release(struct device *dev)
-{
- /* kref_put(WHAT) */
- dev_dbg(dev, "%s\n", __func__);
-}
-
-
-static void __init
+static void
init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
{
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
@@ -1566,14 +1727,14 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
ep->end_point.name = ep->name;
INIT_LIST_HEAD(&ep->end_point.ep_list);
if (!epnum) {
- ep->end_point.maxpacket = 64;
+ usb_ep_set_maxpacket_limit(&ep->end_point, 64);
ep->end_point.ops = &musb_g_ep0_ops;
musb->g.ep0 = &ep->end_point;
} else {
if (is_in)
- ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
+ usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
else
- ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
+ usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
ep->end_point.ops = &musb_ep_ops;
list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
}
@@ -1583,13 +1744,13 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
* Initialize the endpoints exposed to peripheral drivers, with backlinks
* to the rest of the driver state.
*/
-static inline void __init musb_g_init_endpoints(struct musb *musb)
+static inline void musb_g_init_endpoints(struct musb *musb)
{
u8 epnum;
struct musb_hw_ep *hw_ep;
unsigned count = 0;
- /* intialize endpoint list just once */
+ /* initialize endpoint list just once */
INIT_LIST_HEAD(&(musb->g.ep_list));
for (epnum = 0, hw_ep = musb->endpoints;
@@ -1616,7 +1777,7 @@ static inline void __init musb_g_init_endpoints(struct musb *musb)
/* called once during driver setup to initialize and link into
* the driver model; memory is zeroed.
*/
-int __init musb_gadget_setup(struct musb *musb)
+int musb_gadget_setup(struct musb *musb)
{
int status;
@@ -1624,42 +1785,44 @@ int __init musb_gadget_setup(struct musb *musb)
* musb peripherals at the same time, only the bus lock
* is probably held.
*/
- if (the_gadget)
- return -EBUSY;
- the_gadget = musb;
musb->g.ops = &musb_gadget_operations;
- musb->g.is_dualspeed = 1;
+ musb->g.max_speed = USB_SPEED_HIGH;
musb->g.speed = USB_SPEED_UNKNOWN;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv->otg->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+
/* this "gadget" abstracts/virtualizes the controller */
- dev_set_name(&musb->g.dev, "gadget");
- musb->g.dev.parent = musb->controller;
- musb->g.dev.dma_mask = musb->controller->dma_mask;
- musb->g.dev.release = musb_gadget_release;
musb->g.name = musb_driver_name;
-
- if (is_otg_enabled(musb))
- musb->g.is_otg = 1;
+#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
+ musb->g.is_otg = 1;
+#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
+ musb->g.is_otg = 0;
+#endif
musb_g_init_endpoints(musb);
musb->is_active = 0;
musb_platform_try_idle(musb, 0);
- status = device_register(&musb->g.dev);
- if (status != 0)
- the_gadget = NULL;
+ status = usb_add_gadget_udc(musb->controller, &musb->g);
+ if (status)
+ goto err;
+
+ return 0;
+err:
+ musb->g.dev.parent = NULL;
+ device_unregister(&musb->g.dev);
return status;
}
void musb_gadget_cleanup(struct musb *musb)
{
- if (musb != the_gadget)
+ if (musb->port_mode == MUSB_PORT_MODE_HOST)
return;
-
- device_unregister(&musb->g.dev);
- the_gadget = NULL;
+ usb_del_gadget_udc(&musb->g);
}
/*
@@ -1668,100 +1831,55 @@ void musb_gadget_cleanup(struct musb *musb)
*
* -EINVAL something went wrong (not driver)
* -EBUSY another gadget is already using the controller
- * -ENOMEM no memeory to perform the operation
+ * -ENOMEM no memory to perform the operation
*
* @param driver the gadget driver
* @return <0 if error, 0 if everything is fine
*/
-int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+static int musb_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- int retval;
- unsigned long flags;
- struct musb *musb = the_gadget;
-
- if (!driver
- || driver->speed != USB_SPEED_HIGH
- || !driver->bind
- || !driver->setup)
- return -EINVAL;
-
- /* driver must be initialized to support peripheral mode */
- if (!musb || !(musb->board_mode == MUSB_OTG
- || musb->board_mode != MUSB_OTG)) {
- DBG(1, "%s, no dev??\n", __func__);
- return -ENODEV;
- }
-
- DBG(3, "registering driver %s\n", driver->function);
- spin_lock_irqsave(&musb->lock, flags);
+ struct musb *musb = gadget_to_musb(g);
+ struct usb_otg *otg = musb->xceiv->otg;
+ unsigned long flags;
+ int retval = 0;
- if (musb->gadget_driver) {
- DBG(1, "%s is already bound to %s\n",
- musb_driver_name,
- musb->gadget_driver->driver.name);
- retval = -EBUSY;
- } else {
- musb->gadget_driver = driver;
- musb->g.dev.driver = &driver->driver;
- driver->driver.bus = NULL;
- musb->softconnect = 1;
- retval = 0;
+ if (driver->max_speed < USB_SPEED_HIGH) {
+ retval = -EINVAL;
+ goto err;
}
- spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_get_sync(musb->controller);
- if (retval == 0) {
- retval = driver->bind(&musb->g);
- if (retval != 0) {
- DBG(3, "bind to driver %s failed --> %d\n",
- driver->driver.name, retval);
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
- }
+ dev_dbg(musb->controller, "registering driver %s\n", driver->function);
- spin_lock_irqsave(&musb->lock, flags);
+ musb->softconnect = 0;
+ musb->gadget_driver = driver;
- /* REVISIT always use otg_set_peripheral(), handling
- * issues including the root hub one below ...
- */
- musb->xceiv.gadget = &musb->g;
- musb->xceiv.state = OTG_STATE_B_IDLE;
- musb->is_active = 1;
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->is_active = 1;
- /* FIXME this ignores the softconnect flag. Drivers are
- * allowed hold the peripheral inactive until for example
- * userspace hooks up printer hardware or DSP codecs, so
- * hosts only see fully functional devices.
- */
+ otg_set_peripheral(otg, &musb->g);
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ spin_unlock_irqrestore(&musb->lock, flags);
- if (!is_otg_enabled(musb))
- musb_start(musb);
+ musb_start(musb);
- spin_unlock_irqrestore(&musb->lock, flags);
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ if (musb->xceiv->last_event == USB_EVENT_ID)
+ musb_platform_set_vbus(musb, 1);
- if (is_otg_enabled(musb)) {
- DBG(3, "OTG startup...\n");
+ if (musb->xceiv->last_event == USB_EVENT_NONE)
+ pm_runtime_put(musb->controller);
- /* REVISIT: funcall to other code, which also
- * handles power budgeting ... this way also
- * ensures HdrcStart is indirectly called.
- */
- retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
- if (retval < 0) {
- DBG(1, "add_hcd failed, %d\n", retval);
- spin_lock_irqsave(&musb->lock, flags);
- musb->xceiv.gadget = NULL;
- musb->xceiv.state = OTG_STATE_UNDEFINED;
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
- spin_unlock_irqrestore(&musb->lock, flags);
- }
- }
- }
+ return 0;
+err:
return retval;
}
-EXPORT_SYMBOL(usb_gadget_register_driver);
static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
{
@@ -1798,10 +1916,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
nuke(&hw_ep->ep_out, -ESHUTDOWN);
}
}
-
- spin_unlock(&musb->lock);
- driver->disconnect(&musb->g);
- spin_lock(&musb->lock);
}
}
@@ -1811,58 +1925,48 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
*
* @param driver the gadget driver to unregister
*/
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int musb_gadget_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
+ struct musb *musb = gadget_to_musb(g);
unsigned long flags;
- int retval = 0;
- struct musb *musb = the_gadget;
- if (!driver || !driver->unbind || !musb)
- return -EINVAL;
+ if (musb->xceiv->last_event == USB_EVENT_NONE)
+ pm_runtime_get_sync(musb->controller);
- /* REVISIT always use otg_set_peripheral() here too;
+ /*
+ * REVISIT always use otg_set_peripheral() here too;
* this needs to shut down the OTG engine.
*/
spin_lock_irqsave(&musb->lock, flags);
-#ifdef CONFIG_USB_MUSB_OTG
musb_hnp_stop(musb);
-#endif
-
- if (musb->gadget_driver == driver) {
-
- (void) musb_gadget_vbus_draw(&musb->g, 0);
- musb->xceiv.state = OTG_STATE_UNDEFINED;
- stop_activity(musb, driver);
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
- DBG(3, "unregistering driver %s\n", driver->function);
- spin_unlock_irqrestore(&musb->lock, flags);
- driver->unbind(&musb->g);
- spin_lock_irqsave(&musb->lock, flags);
+ musb->xceiv->state = OTG_STATE_UNDEFINED;
+ stop_activity(musb, driver);
+ otg_set_peripheral(musb->xceiv->otg, NULL);
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
+ dev_dbg(musb->controller, "unregistering driver %s\n",
+ driver ? driver->function : "(removed)");
- musb->is_active = 0;
- musb_platform_try_idle(musb, 0);
- } else
- retval = -EINVAL;
+ musb->is_active = 0;
+ musb->gadget_driver = NULL;
+ musb_platform_try_idle(musb, 0);
spin_unlock_irqrestore(&musb->lock, flags);
- if (is_otg_enabled(musb) && retval == 0) {
- usb_remove_hcd(musb_to_hcd(musb));
- /* FIXME we need to be able to register another
- * gadget driver here and have everything work;
- * that currently misbehaves.
- */
- }
+ /*
+ * FIXME we need to be able to register another
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
- return retval;
-}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
+ pm_runtime_put(musb->controller);
+ return 0;
+}
/* ----------------------------------------------------------------------- */
@@ -1871,7 +1975,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
void musb_g_resume(struct musb *musb)
{
musb->is_suspended = 0;
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_IDLE:
break;
case OTG_STATE_B_WAIT_ACON:
@@ -1885,7 +1989,7 @@ void musb_g_resume(struct musb *musb)
break;
default:
WARNING("unhandled RESUME transition (%s)\n",
- otg_state_string(musb));
+ usb_otg_state_string(musb->xceiv->state));
}
}
@@ -1895,12 +1999,12 @@ void musb_g_suspend(struct musb *musb)
u8 devctl;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- DBG(3, "devctl %02x\n", devctl);
+ dev_dbg(musb->controller, "devctl %02x\n", devctl);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_B_IDLE:
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_B_PERIPHERAL:
musb->is_suspended = 1;
@@ -1915,7 +2019,7 @@ void musb_g_suspend(struct musb *musb)
* A_PERIPHERAL may need care too
*/
WARNING("unhandled SUSPEND transition (%s)\n",
- otg_state_string(musb));
+ usb_otg_state_string(musb->xceiv->state));
}
}
@@ -1931,7 +2035,7 @@ void musb_g_disconnect(struct musb *musb)
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
- DBG(3, "devctl %02x\n", devctl);
+ dev_dbg(musb->controller, "devctl %02x\n", devctl);
/* clear HR */
musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
@@ -1946,22 +2050,22 @@ void musb_g_disconnect(struct musb *musb)
spin_lock(&musb->lock);
}
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
default:
-#ifdef CONFIG_USB_MUSB_OTG
- DBG(2, "Unhandled disconnect %s, setting a_idle\n",
- otg_state_string(musb));
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
+ usb_otg_state_string(musb->xceiv->state));
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
break;
case OTG_STATE_A_PERIPHERAL:
- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ MUSB_HST_MODE(musb);
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_HOST:
-#endif
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_B_SRP_INIT:
break;
@@ -1978,10 +2082,9 @@ __acquires(musb->lock)
u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
u8 power;
- DBG(3, "<== %s addr=%x driver '%s'\n",
+ dev_dbg(musb->controller, "<== %s driver '%s'\n",
(devctl & MUSB_DEVCTL_BDEVICE)
? "B-Device" : "A-Device",
- musb_readb(mbase, MUSB_FADDR),
musb->gadget_driver
? musb->gadget_driver->driver.name
: NULL
@@ -2016,16 +2119,22 @@ __acquires(musb->lock)
/* Normal reset, as B-Device;
* or else after HNP, as A-Device
*/
- if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ if (!musb->g.is_otg) {
+ /* USB device controllers that are not OTG compatible
+ * may not have DEVCTL register in silicon.
+ * In that case, do not rely on devctl for setting
+ * peripheral mode.
+ */
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb->g.is_a_peripheral = 0;
+ } else if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->g.is_a_peripheral = 0;
- } else if (is_otg_enabled(musb)) {
- musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+ } else {
+ musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
musb->g.is_a_peripheral = 1;
- } else
- WARN_ON(1);
+ }
/* start with default limits on VBUS power draw */
- (void) musb_gadget_vbus_draw(&musb->g,
- is_otg_enabled(musb) ? 8 : 100);
+ (void) musb_gadget_vbus_draw(&musb->g, 8);
}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index 59502da9f73..0314dfc770c 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,13 +35,54 @@
#ifndef __MUSB_GADGET_H
#define __MUSB_GADGET_H
+#include <linux/list.h>
+
+#if IS_ENABLED(CONFIG_USB_MUSB_GADGET) || IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+extern int musb_gadget_setup(struct musb *);
+
+#else
+static inline irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+ return 0;
+}
+
+static inline void musb_g_tx(struct musb *musb, u8 epnum) {}
+static inline void musb_g_rx(struct musb *musb, u8 epnum) {}
+static inline void musb_g_reset(struct musb *musb) {}
+static inline void musb_g_suspend(struct musb *musb) {}
+static inline void musb_g_resume(struct musb *musb) {}
+static inline void musb_g_wakeup(struct musb *musb) {}
+static inline void musb_g_disconnect(struct musb *musb) {}
+static inline void musb_gadget_cleanup(struct musb *musb) {}
+static inline int musb_gadget_setup(struct musb *musb)
+{
+ return 0;
+}
+#endif
+
+enum buffer_map_state {
+ UN_MAPPED = 0,
+ PRE_MAPPED,
+ MUSB_MAPPED
+};
+
struct musb_request {
struct usb_request request;
+ struct list_head list;
struct musb_ep *ep;
struct musb *musb;
u8 tx; /* endpoint direction */
u8 epnum;
- u8 mapped;
+ enum buffer_map_state map_state;
};
static inline struct musb_request *to_musb_request(struct usb_request *req)
@@ -75,8 +116,12 @@ struct musb_ep {
/* later things are modified based on usage */
struct list_head req_list;
+ u8 wedged;
+
/* true if lock must be dropped but req_list may not be advanced */
u8 busy;
+
+ u8 hb_mult;
};
static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
@@ -84,25 +129,19 @@ static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
}
-static inline struct usb_request *next_request(struct musb_ep *ep)
+static inline struct musb_request *next_request(struct musb_ep *ep)
{
struct list_head *queue = &ep->req_list;
if (list_empty(queue))
return NULL;
- return container_of(queue->next, struct usb_request, list);
+ return container_of(queue->next, struct musb_request, list);
}
-extern void musb_g_tx(struct musb *musb, u8 epnum);
-extern void musb_g_rx(struct musb *musb, u8 epnum);
-
extern const struct usb_ep_ops musb_g_ep0_ops;
-extern int musb_gadget_setup(struct musb *);
-extern void musb_gadget_cleanup(struct musb *);
-
extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
-extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+extern void musb_ep_restart(struct musb *, struct musb_request *);
#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 3f5e30ddfa2..2af45a0c893 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -4,6 +4,7 @@
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -36,7 +37,6 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -58,7 +58,8 @@
static char *decode_ep0stage(u8 stage)
{
switch (stage) {
- case MUSB_EP0_STAGE_SETUP: return "idle";
+ case MUSB_EP0_STAGE_IDLE: return "idle";
+ case MUSB_EP0_STAGE_SETUP: return "setup";
case MUSB_EP0_STAGE_TX: return "in";
case MUSB_EP0_STAGE_RX: return "out";
case MUSB_EP0_STAGE_ACKWAIT: return "wait";
@@ -86,7 +87,6 @@ static int service_tx_status_request(
case USB_RECIP_DEVICE:
result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
-#ifdef CONFIG_USB_MUSB_OTG
if (musb->g.is_otg) {
result[0] |= musb->g.b_hnp_enable
<< USB_DEVICE_B_HNP_ENABLE;
@@ -95,7 +95,6 @@ static int service_tx_status_request(
result[0] |= musb->g.a_hnp_support
<< USB_DEVICE_A_HNP_SUPPORT;
}
-#endif
break;
case USB_RECIP_INTERFACE:
@@ -197,7 +196,6 @@ service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
{
musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
}
/*
@@ -208,7 +206,7 @@ static inline void musb_try_b_hnp_enable(struct musb *musb)
void __iomem *mbase = musb->mregs;
u8 devctl;
- DBG(1, "HNP: Setting HR\n");
+ dev_dbg(musb->controller, "HNP: Setting HR\n");
devctl = musb_readb(mbase, MUSB_DEVCTL);
musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
}
@@ -256,30 +254,61 @@ __acquires(musb->lock)
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:{
- const u8 num = ctrlrequest->wIndex & 0x0f;
- struct musb_ep *musb_ep;
+ const u8 epnum =
+ ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *ep;
+ struct musb_request *request;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
- if (num == 0
- || num >= MUSB_C_NUM_EPS
- || ctrlrequest->wValue
- != USB_ENDPOINT_HALT)
+ if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+ ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
- if (ctrlrequest->wIndex & USB_DIR_IN)
- musb_ep = &musb->endpoints[num].ep_in;
+ ep = musb->endpoints + epnum;
+ regs = ep->regs;
+ is_in = ctrlrequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ musb_ep = &ep->ep_in;
else
- musb_ep = &musb->endpoints[num].ep_out;
+ musb_ep = &ep->ep_out;
if (!musb_ep->desc)
break;
- /* REVISIT do it directly, no locking games */
- spin_unlock(&musb->lock);
- musb_gadget_set_halt(&musb_ep->end_point, 0);
- spin_lock(&musb->lock);
+ handled = 1;
+ /* Ignore request if endpoint is wedged */
+ if (musb_ep->wedged)
+ break;
+
+ musb_ep_select(mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_CLRDATATOG |
+ MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_SENDSTALL |
+ MUSB_TXCSR_P_SENTSTALL |
+ MUSB_TXCSR_TXPKTRDY);
+ musb_writew(regs, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_CLRDATATOG |
+ MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_RXCSR_P_SENDSTALL |
+ MUSB_RXCSR_P_SENTSTALL);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
+
+ /* Maybe start the first request in the queue */
+ request = next_request(musb_ep);
+ if (!musb_ep->busy && request) {
+ dev_dbg(musb->controller, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
/* select ep0 again */
musb_ep_select(mbase, 0);
- handled = 1;
} break;
default:
/* class, vendor, etc ... delegate */
@@ -327,6 +356,31 @@ __acquires(musb->lock)
musb->test_mode_nr =
MUSB_TEST_PACKET;
break;
+
+ case 0xc0:
+ /* TEST_FORCE_HS */
+ pr_debug("TEST_FORCE_HS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HS;
+ break;
+ case 0xc1:
+ /* TEST_FORCE_FS */
+ pr_debug("TEST_FORCE_FS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_FS;
+ break;
+ case 0xc2:
+ /* TEST_FIFO_ACCESS */
+ pr_debug("TEST_FIFO_ACCESS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FIFO_ACCESS;
+ break;
+ case 0xc3:
+ /* TEST_FORCE_HOST */
+ pr_debug("TEST_FORCE_HOST\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HOST;
+ break;
default:
goto stall;
}
@@ -335,7 +389,6 @@ __acquires(musb->lock)
if (handled > 0)
musb->test_mode = true;
break;
-#ifdef CONFIG_USB_MUSB_OTG
case USB_DEVICE_B_HNP_ENABLE:
if (!musb->g.is_otg)
goto stall;
@@ -352,7 +405,9 @@ __acquires(musb->lock)
goto stall;
musb->g.a_alt_hnp_support = 1;
break;
-#endif
+ case USB_DEVICE_DEBUG_MODE:
+ handled = 0;
+ break;
stall:
default:
handled = -EINVAL;
@@ -372,10 +427,8 @@ stall:
int is_in;
u16 csr;
- if (epnum == 0
- || epnum >= MUSB_C_NUM_EPS
- || ctrlrequest->wValue
- != USB_ENDPOINT_HALT)
+ if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+ ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
ep = musb->endpoints + epnum;
@@ -390,24 +443,20 @@ stall:
musb_ep_select(mbase, epnum);
if (is_in) {
- csr = musb_readw(regs,
- MUSB_TXCSR);
+ csr = musb_readw(regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY)
csr |= MUSB_TXCSR_FLUSHFIFO;
csr |= MUSB_TXCSR_P_SENDSTALL
| MUSB_TXCSR_CLRDATATOG
| MUSB_TXCSR_P_WZC_BITS;
- musb_writew(regs, MUSB_TXCSR,
- csr);
+ musb_writew(regs, MUSB_TXCSR, csr);
} else {
- csr = musb_readw(regs,
- MUSB_RXCSR);
+ csr = musb_readw(regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG
- | MUSB_TXCSR_P_WZC_BITS;
- musb_writew(regs, MUSB_RXCSR,
- csr);
+ | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR, csr);
}
/* select ep0 again */
@@ -436,10 +485,12 @@ stall:
static void ep0_rxstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
+ struct musb_request *request;
struct usb_request *req;
u16 count, csr;
- req = next_ep0_request(musb);
+ request = next_ep0_request(musb);
+ req = &request->request;
/* read packet and ack; or stall because of gadget driver bug:
* should have provided the rx buffer before setup() returned.
@@ -454,8 +505,10 @@ static void ep0_rxstate(struct musb *musb)
req->status = -EOVERFLOW;
count = len;
}
- musb_read_fifo(&musb->endpoints[0], count, buf);
- req->actual += count;
+ if (count > 0) {
+ musb_read_fifo(&musb->endpoints[0], count, buf);
+ req->actual += count;
+ }
csr = MUSB_CSR0_P_SVDRXPKTRDY;
if (count < 64 || req->actual == req->length) {
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
@@ -489,17 +542,20 @@ static void ep0_rxstate(struct musb *musb)
static void ep0_txstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
- struct usb_request *request = next_ep0_request(musb);
+ struct musb_request *req = next_ep0_request(musb);
+ struct usb_request *request;
u16 csr = MUSB_CSR0_TXPKTRDY;
u8 *fifo_src;
u8 fifo_count;
- if (!request) {
+ if (!req) {
/* WARN_ON(1); */
- DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+ dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
return;
}
+ request = &req->request;
+
/* load the data */
fifo_src = (u8 *) request->buf + request->actual;
fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
@@ -509,7 +565,8 @@ static void ep0_txstate(struct musb *musb)
/* update the flags */
if (fifo_count < MUSB_MAX_END0_PACKET
- || request->actual == request->length) {
+ || (request->actual == request->length
+ && !request->zero)) {
musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
csr |= MUSB_CSR0_P_DATAEND;
} else
@@ -542,7 +599,7 @@ static void ep0_txstate(struct musb *musb)
static void
musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
{
- struct usb_request *r;
+ struct musb_request *r;
void __iomem *regs = musb->control_ep->regs;
musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
@@ -550,7 +607,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
/* NOTE: earlier 2.6 versions changed setup packets to host
* order, but now USB packets always stay in USB byte order.
*/
- DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+ dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n",
req->bRequestType,
req->bRequest,
le16_to_cpu(req->wValue),
@@ -560,7 +617,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
/* clean up any leftover transfers */
r = next_ep0_request(musb);
if (r)
- musb_g_ep0_giveback(musb, r);
+ musb_g_ep0_giveback(musb, &r->request);
/* For zero-data requests we want to delay the STATUS stage to
* avoid SETUPEND errors. If we read data (OUT), delay accepting
@@ -618,17 +675,23 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
csr = musb_readw(regs, MUSB_CSR0);
len = musb_readb(regs, MUSB_COUNT0);
- DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
- csr, len,
- musb_readb(mbase, MUSB_FADDR),
- decode_ep0stage(musb->ep0_state));
+ dev_dbg(musb->controller, "csr %04x, count %d, ep0stage %s\n",
+ csr, len, decode_ep0stage(musb->ep0_state));
+
+ if (csr & MUSB_CSR0_P_DATAEND) {
+ /*
+ * If DATAEND is set we should not call the callback,
+ * hence the status stage is not complete.
+ */
+ return IRQ_HANDLED;
+ }
/* I sent a stall.. need to acknowledge it now.. */
if (csr & MUSB_CSR0_P_SENTSTALL) {
musb_writew(regs, MUSB_CSR0,
csr & ~MUSB_CSR0_P_SENTSTALL);
retval = IRQ_HANDLED;
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
csr = musb_readw(regs, MUSB_CSR0);
}
@@ -636,7 +699,18 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
if (csr & MUSB_CSR0_P_SETUPEND) {
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
retval = IRQ_HANDLED;
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ /* Transition into the early status phase */
+ switch (musb->ep0_state) {
+ case MUSB_EP0_STAGE_TX:
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+ break;
+ case MUSB_EP0_STAGE_RX:
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ break;
+ default:
+ ERR("SetupEnd came in a wrong ep0stage %s\n",
+ decode_ep0stage(musb->ep0_state));
+ }
csr = musb_readw(regs, MUSB_CSR0);
/* NOTE: request may need completion */
}
@@ -678,7 +752,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
/* enter test mode if needed (exit by reset) */
else if (musb->test_mode) {
- DBG(1, "entering TESTMODE\n");
+ dev_dbg(musb->controller, "entering TESTMODE\n");
if (MUSB_TEST_PACKET == musb->test_mode_nr)
musb_load_testpacket(musb);
@@ -691,17 +765,37 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
case MUSB_EP0_STAGE_STATUSOUT:
/* end of sequence #1: write to host (TX state) */
{
- struct usb_request *req;
+ struct musb_request *req;
req = next_ep0_request(musb);
if (req)
- musb_g_ep0_giveback(musb, req);
+ musb_g_ep0_giveback(musb, &req->request);
}
+
+ /*
+ * In case when several interrupts can get coalesced,
+ * check to see if we've already received a SETUP packet...
+ */
+ if (csr & MUSB_CSR0_RXPKTRDY)
+ goto setup;
+
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+ break;
+
+ case MUSB_EP0_STAGE_IDLE:
+ /*
+ * This state is typically (but not always) indiscernible
+ * from the status states since the corresponding interrupts
+ * tend to happen within too little period of time (with only
+ * a zero-length packet in between) and so get coalesced...
+ */
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
/* FALLTHROUGH */
case MUSB_EP0_STAGE_SETUP:
+setup:
if (csr & MUSB_CSR0_RXPKTRDY) {
struct usb_ctrlrequest setup;
int handled = 0;
@@ -737,12 +831,18 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
handled = service_zero_data_request(
musb, &setup);
+ /*
+ * We're expecting no data in any case, so
+ * always set the DATAEND bit -- doing this
+ * here helps avoid SetupEnd interrupt coming
+ * in the idle stage when we're stalling...
+ */
+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
+
/* status stage might be immediate */
- if (handled > 0) {
- musb->ackpend |= MUSB_CSR0_P_DATAEND;
+ if (handled > 0)
musb->ep0_state =
MUSB_EP0_STAGE_STATUSIN;
- }
break;
/* sequence #1 (IN to host), includes GET_STATUS
@@ -764,7 +864,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
break;
}
- DBG(3, "handled %d, csr %04x, ep0stage %s\n",
+ dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n",
handled, csr,
decode_ep0stage(musb->ep0_state));
@@ -781,9 +881,9 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
if (handled < 0) {
musb_ep_select(mbase, 0);
stall:
- DBG(3, "stall (%d)\n", handled);
+ dev_dbg(musb->controller, "stall (%d)\n", handled);
musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
finish:
musb_writew(regs, MUSB_CSR0,
musb->ackpend);
@@ -803,7 +903,7 @@ finish:
/* "can't happen" */
WARN_ON(1);
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
break;
}
@@ -861,16 +961,16 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
status = 0;
break;
default:
- DBG(1, "ep0 request queued in state %d\n",
+ dev_dbg(musb->controller, "ep0 request queued in state %d\n",
musb->ep0_state);
status = -EINVAL;
goto cleanup;
}
/* add request to the list */
- list_add_tail(&(req->request.list), &(ep->req_list));
+ list_add_tail(&req->list, &ep->req_list);
- DBG(3, "queue to %s (%s), length=%d\n",
+ dev_dbg(musb->controller, "queue to %s (%s), length=%d\n",
ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
req->request.length);
@@ -959,11 +1059,11 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
csr |= MUSB_CSR0_P_SENDSTALL;
musb_writew(regs, MUSB_CSR0, csr);
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ep0_state = MUSB_EP0_STAGE_IDLE;
musb->ackpend = 0;
break;
default:
- DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
+ dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state);
status = -EINVAL;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 6dbbd0786a6..eb06291a40c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -4,6 +4,7 @@
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -38,13 +39,12 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
+#include <linux/dma-mapping.h>
#include "musb_core.h"
#include "musb_host.h"
-
/* MUSB HOST status 22-mar-2006
*
* - There's still lots of partial code duplication for fault paths, so
@@ -64,11 +64,8 @@
*
* - DMA (Mentor/OMAP) ...has at least toggle update problems
*
- * - Still no traffic scheduling code to make NAKing for bulk or control
- * transfers unable to starve other requests; or to make efficient use
- * of hardware with periodic transfers. (Note that network drivers
- * commonly post bulk reads that stay pending for a long time; these
- * would make very visible trouble.)
+ * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
+ * starvation ... nothing yet for TX, interrupt, or bulk.
*
* - Not tested with HNP, but some SRP paths seem to behave.
*
@@ -88,11 +85,8 @@
*
* CONTROL transfers all go through ep0. BULK ones go through dedicated IN
* and OUT endpoints ... hardware is dedicated for those "async" queue(s).
- *
* (Yes, bulk _could_ use more of the endpoints than that, and would even
- * benefit from it ... one remote device may easily be NAKing while others
- * need to perform transfers in that same direction. The same thing could
- * be done in software though, assuming dma cooperates.)
+ * benefit from it.)
*
* INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
* So far that scheduling is both dumb and optimistic: the endpoint will be
@@ -100,16 +94,22 @@
* of transfers between endpoints, or anything clever.
*/
+struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return *(struct musb **) hcd->hcd_priv;
+}
+
static void musb_ep_program(struct musb *musb, u8 epnum,
- struct urb *urb, unsigned int nOut,
- u8 *buf, u32 len);
+ struct urb *urb, int is_out,
+ u8 *buf, u32 offset, u32 len);
/*
* Clear TX fifo. Needed to avoid BABBLE errors.
*/
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
+ struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
u16 csr;
u16 lastcsr = 0;
@@ -118,7 +118,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
if (csr != lastcsr)
- DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
lastcsr = csr;
csr |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_TXCSR, csr);
@@ -131,6 +131,29 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
}
}
+static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
+{
+ void __iomem *epio = ep->regs;
+ u16 csr;
+ int retries = 5;
+
+ /* scrub any data left in the fifo */
+ do {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
+ break;
+ musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ udelay(10);
+ } while (--retries);
+
+ WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
+ ep->epnum, csr);
+
+ /* and reset for the next transfer */
+ musb_writew(epio, MUSB_TXCSR, 0);
+}
+
/*
* Start transmit. Caller is responsible for locking shared resources.
* musb must be locked.
@@ -151,16 +174,31 @@ static inline void musb_h_tx_start(struct musb_hw_ep *ep)
}
-static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
+static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
{
u16 txcsr;
/* NOTE: no locks here; caller should lock and select EP */
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+ if (is_cppi_enabled())
+ txcsr |= MUSB_TXCSR_DMAMODE;
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
}
+static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
+{
+ if (is_in != 0 || ep->is_shared_fifo)
+ ep->in_qh = qh;
+ if (is_in == 0 || ep->is_shared_fifo)
+ ep->out_qh = qh;
+}
+
+static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
+{
+ return is_in ? ep->in_qh : ep->out_qh;
+}
+
/*
* Start the URB at the front of an endpoint's queue
* end must be claimed from the caller.
@@ -172,9 +210,10 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{
u16 frame;
u32 len;
- void *buf;
void __iomem *mbase = musb->mregs;
struct urb *urb = next_urb(qh);
+ void *buf = urb->transfer_buffer;
+ u32 offset = 0;
struct musb_hw_ep *hw_ep = qh->hw_ep;
unsigned pipe = urb->pipe;
u8 address = usb_pipedevice(pipe);
@@ -189,7 +228,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
case USB_ENDPOINT_XFER_CONTROL:
/* control transfers always start with SETUP */
is_in = 0;
- hw_ep->out_qh = qh;
musb->ep0_stage = MUSB_EP0_START;
buf = urb->setup_packet;
len = 8;
@@ -197,15 +235,16 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
case USB_ENDPOINT_XFER_ISOC:
qh->iso_idx = 0;
qh->frame = 0;
- buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+ offset = urb->iso_frame_desc[0].offset;
len = urb->iso_frame_desc[0].length;
break;
default: /* bulk, interrupt */
- buf = urb->transfer_buffer;
- len = urb->transfer_buffer_length;
+ /* actual_length may be nonzero on retry paths */
+ buf = urb->transfer_buffer + urb->actual_length;
+ len = urb->transfer_buffer_length - urb->actual_length;
}
- DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+ dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
qh, urb, address, qh->epnum,
is_in ? "in" : "out",
({char *s; switch (qh->type) {
@@ -213,15 +252,12 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
default: s = "-intr"; break;
- }; s; }),
- epnum, buf, len);
+ } s; }),
+ epnum, buf + offset, len);
/* Configure endpoint */
- if (is_in || hw_ep->is_shared_fifo)
- hw_ep->in_qh = qh;
- else
- hw_ep->out_qh = qh;
- musb_ep_program(musb, epnum, urb, !is_in, buf, len);
+ musb_ep_set_qh(hw_ep, is_in, qh);
+ musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
/* transmit may have more work: start it when it is time */
if (is_in)
@@ -231,14 +267,12 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
- DBG(3, "check whether there's still time for periodic Tx\n");
- qh->iso_idx = 0;
+ dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
frame = musb_readw(mbase, MUSB_FRAME);
/* FIXME this doesn't implement that scheduling policy ...
* or handle framecounter wrapping
*/
- if ((urb->transfer_flags & URB_ISO_ASAP)
- || (frame >= urb->start_frame)) {
+ if (1) { /* Always assume URB_ISO_ASAP */
/* REVISIT the SOF irq handler shouldn't duplicate
* this code; and we don't init urb->start_frame...
*/
@@ -247,7 +281,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
} else {
qh->frame = urb->start_frame;
/* enable SOF interrupt so we can count down */
- DBG(1, "SOF for %d\n", epnum);
+ dev_dbg(musb->controller, "SOF for %d\n", epnum);
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
@@ -255,37 +289,22 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
break;
default:
start:
- DBG(4, "Start TX%d %s\n", epnum,
+ dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
hw_ep->tx_channel ? "dma" : "pio");
if (!hw_ep->tx_channel)
musb_h_tx_start(hw_ep);
else if (is_cppi_enabled() || tusb_dma_omap())
- cppi_host_txdma_start(hw_ep);
+ musb_h_tx_dma_start(hw_ep);
}
}
-/* caller owns controller lock, irqs are blocked */
-static void
-__musb_giveback(struct musb *musb, struct urb *urb, int status)
+/* Context: caller owns controller lock, IRQs are blocked */
+static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
- DBG(({ int level; switch (status) {
- case 0:
- level = 4;
- break;
- /* common/boring faults */
- case -EREMOTEIO:
- case -ESHUTDOWN:
- case -ECONNRESET:
- case -EPIPE:
- level = 3;
- break;
- default:
- level = 2;
- break;
- }; level; }),
+ dev_dbg(musb->controller,
"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
urb, urb->complete, status,
usb_pipedevice(urb->pipe),
@@ -294,57 +313,54 @@ __acquires(musb->lock)
urb->actual_length, urb->transfer_buffer_length
);
- usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+ usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
spin_unlock(&musb->lock);
- usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+ usb_hcd_giveback_urb(musb->hcd, urb, status);
spin_lock(&musb->lock);
}
-/* for bulk/interrupt endpoints only */
-static inline void
-musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+/* For bulk/interrupt endpoints only */
+static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
+ struct urb *urb)
{
- struct usb_device *udev = urb->dev;
+ void __iomem *epio = qh->hw_ep->regs;
u16 csr;
- void __iomem *epio = ep->regs;
- struct musb_qh *qh;
- /* FIXME: the current Mentor DMA code seems to have
+ /*
+ * FIXME: the current Mentor DMA code seems to have
* problems getting toggle correct.
*/
- if (is_in || ep->is_shared_fifo)
- qh = ep->in_qh;
+ if (is_in)
+ csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
else
- qh = ep->out_qh;
+ csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
- if (!is_in) {
- csr = musb_readw(epio, MUSB_TXCSR);
- usb_settoggle(udev, qh->epnum, 1,
- (csr & MUSB_TXCSR_H_DATATOGGLE)
- ? 1 : 0);
- } else {
- csr = musb_readw(epio, MUSB_RXCSR);
- usb_settoggle(udev, qh->epnum, 0,
- (csr & MUSB_RXCSR_H_DATATOGGLE)
- ? 1 : 0);
- }
+ usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
}
-/* caller owns controller lock, irqs are blocked */
-static struct musb_qh *
-musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+/*
+ * Advance this hardware endpoint's queue, completing the specified URB and
+ * advancing to either the next URB queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, IRQs are blocked
+ */
+static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+ struct musb_hw_ep *hw_ep, int is_in)
{
+ struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
struct musb_hw_ep *ep = qh->hw_ep;
- struct musb *musb = ep->musb;
- int is_in = usb_pipein(urb->pipe);
int ready = qh->is_ready;
+ int status;
+
+ status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
/* save toggle eagerly, for paranoia */
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
- musb_save_toggle(ep, is_in, urb);
+ musb_save_toggle(qh, is_in, urb);
break;
case USB_ENDPOINT_XFER_ISOC:
if (status == 0 && urb->error_count)
@@ -353,7 +369,7 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
}
qh->is_ready = 0;
- __musb_giveback(musb, urb, status);
+ musb_giveback(musb, urb, status);
qh->is_ready = ready;
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
@@ -361,17 +377,24 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
*/
if (list_empty(&qh->hep->urb_list)) {
struct list_head *head;
+ struct dma_controller *dma = musb->dma_controller;
- if (is_in)
+ if (is_in) {
ep->rx_reinit = 1;
- else
+ if (ep->rx_channel) {
+ dma->channel_release(ep->rx_channel);
+ ep->rx_channel = NULL;
+ }
+ } else {
ep->tx_reinit = 1;
+ if (ep->tx_channel) {
+ dma->channel_release(ep->tx_channel);
+ ep->tx_channel = NULL;
+ }
+ }
- /* clobber old pointers to this qh */
- if (is_in || ep->is_shared_fifo)
- ep->in_qh = NULL;
- else
- ep->out_qh = NULL;
+ /* Clobber old pointers to this qh */
+ musb_ep_set_qh(ep, is_in, NULL);
qh->hep->hcpriv = NULL;
switch (qh->type) {
@@ -395,42 +418,15 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
* de-allocated if it's tracked and allocated;
* and where we'd update the schedule tree...
*/
- musb->periodic[ep->epnum] = NULL;
kfree(qh);
qh = NULL;
break;
}
}
- return qh;
-}
-
-/*
- * Advance this hardware endpoint's queue, completing the specified urb and
- * advancing to either the next urb queued to that qh, or else invalidating
- * that qh and advancing to the next qh scheduled after the current one.
- *
- * Context: caller owns controller lock, irqs are blocked
- */
-static void
-musb_advance_schedule(struct musb *musb, struct urb *urb,
- struct musb_hw_ep *hw_ep, int is_in)
-{
- struct musb_qh *qh;
-
- if (is_in || hw_ep->is_shared_fifo)
- qh = hw_ep->in_qh;
- else
- qh = hw_ep->out_qh;
-
- if (urb->status == -EINPROGRESS)
- qh = musb_giveback(qh, urb, 0);
- else
- qh = musb_giveback(qh, urb, urb->status);
if (qh != NULL && qh->is_ready) {
- DBG(4, "... next ep%d %cX urb %p\n",
- hw_ep->epnum, is_in ? 'R' : 'T',
- next_urb(qh));
+ dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
+ hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
}
@@ -474,7 +470,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
/* musb_ep_select(mbase, epnum); */
rx_count = musb_readw(epio, MUSB_RXCOUNT);
- DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
urb->transfer_buffer, qh->offset,
urb->transfer_buffer_length);
@@ -496,7 +492,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
status = -EOVERFLOW;
urb->error_count++;
}
- DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -514,7 +510,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
if (rx_count > length) {
if (urb->status == -EINPROGRESS)
urb->status = -EOVERFLOW;
- DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -573,10 +569,17 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
csr = musb_readw(ep->regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_MODE) {
musb_h_tx_flush_fifo(ep);
+ csr = musb_readw(ep->regs, MUSB_TXCSR);
musb_writew(ep->regs, MUSB_TXCSR,
- MUSB_TXCSR_FRCDATATOG);
+ csr | MUSB_TXCSR_FRCDATATOG);
}
- /* clear mode (and everything else) to enable Rx */
+
+ /*
+ * Clear the MODE bit (and everything else) to enable Rx.
+ * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
+ */
+ if (csr & MUSB_TXCSR_DMAMODE)
+ musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
musb_writew(ep->regs, MUSB_TXCSR, 0);
/* scrub all previous state, clearing toggle */
@@ -602,19 +605,97 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
/* NOTE: bulk combining rewrites high bits of maxpacket */
- musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffer mode.
+ */
+ if (musb->double_buffer_not_ok)
+ musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
+ else
+ musb_writew(ep->regs, MUSB_RXMAXP,
+ qh->maxpacket | ((qh->hb_mult - 1) << 11));
ep->rx_reinit = 0;
}
+static bool musb_tx_dma_program(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep, struct musb_qh *qh,
+ struct urb *urb, u32 offset, u32 length)
+{
+ struct dma_channel *channel = hw_ep->tx_channel;
+ void __iomem *epio = hw_ep->regs;
+ u16 pkt_size = qh->maxpacket;
+ u16 csr;
+ u8 mode;
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
+ if (length > channel->max_len)
+ length = channel->max_len;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (length > pkt_size) {
+ mode = 1;
+ csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
+ /* autoset shouldn't be set in high bandwidth */
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 1 Yes(Normal)
+ * 0 >1 No(High BW ISO)
+ * 1 1 Yes(HS bulk)
+ * 1 >1 Yes(FS bulk)
+ */
+ if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
+ can_bulk_split(hw_ep->musb, qh->type)))
+ csr |= MUSB_TXCSR_AUTOSET;
+ } else {
+ mode = 0;
+ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
+ csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
+ }
+ channel->desired_mode = mode;
+ musb_writew(epio, MUSB_TXCSR, csr);
+#else
+ if (!is_cppi_enabled() && !tusb_dma_omap())
+ return false;
+
+ channel->actual_len = 0;
+
+ /*
+ * TX uses "RNDIS" mode automatically but needs help
+ * to identify the zero-length-final-packet case.
+ */
+ mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
+#endif
+
+ qh->segsize = length;
+
+ /*
+ * Ensure the data reaches to main memory before starting
+ * DMA transfer
+ */
+ wmb();
+
+ if (!dma->channel_program(channel, pkt_size, mode,
+ urb->transfer_dma + offset, length)) {
+ dma->channel_release(channel);
+ hw_ep->tx_channel = NULL;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
+ musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
+ return false;
+ }
+ return true;
+}
/*
* Program an HDRC endpoint as per the given URB
* Context: irqs blocked, controller lock held
*/
static void musb_ep_program(struct musb *musb, u8 epnum,
- struct urb *urb, unsigned int is_out,
- u8 *buf, u32 len)
+ struct urb *urb, int is_out,
+ u8 *buf, u32 offset, u32 len)
{
struct dma_controller *dma_controller;
struct dma_channel *dma_channel;
@@ -622,17 +703,12 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
void __iomem *mbase = musb->mregs;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
- struct musb_qh *qh;
- u16 packet_sz;
-
- if (!is_out || hw_ep->is_shared_fifo)
- qh = hw_ep->in_qh;
- else
- qh = hw_ep->out_qh;
-
- packet_sz = qh->maxpacket;
+ struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
+ u16 packet_sz = qh->maxpacket;
+ u8 use_dma = 1;
+ u16 csr;
- DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
"h_addr%02x h_port%02x bytes %d\n",
is_out ? "-->" : "<--",
epnum, urb, urb->dev->speed,
@@ -642,9 +718,17 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
musb_ep_select(mbase, epnum);
+ if (is_out && !len) {
+ use_dma = 0;
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ hw_ep->tx_channel = NULL;
+ }
+
/* candidate for DMA? */
dma_controller = musb->dma_controller;
- if (is_dma_capable() && epnum && dma_controller) {
+ if (use_dma && is_dma_capable() && epnum && dma_controller) {
dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
if (!dma_channel) {
dma_channel = dma_controller->channel_alloc(
@@ -668,17 +752,28 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
csr = musb_readw(epio, MUSB_TXCSR);
/* disable interrupt in case we flush */
- int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ int_txe = musb->intrtxe;
musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
/* general endpoint setup */
if (epnum) {
- /* ASSERT: TXCSR_DMAENAB was already cleared */
-
/* flush all old state, set default */
- musb_h_tx_flush_fifo(hw_ep);
+ /*
+ * We could be flushing valid
+ * packets in double buffering
+ * case
+ */
+ if (!hw_ep->tx_double_buffered)
+ musb_h_tx_flush_fifo(hw_ep);
+
+ /*
+ * We must not clear the DMAMODE bit before or in
+ * the same cycle with the DMAENAB bit, so we clear
+ * the latter first...
+ */
csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
- | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_FRCDATATOG
| MUSB_TXCSR_H_RXSTALL
| MUSB_TXCSR_H_ERROR
@@ -686,24 +781,22 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
);
csr |= MUSB_TXCSR_MODE;
- if (usb_gettoggle(urb->dev,
- qh->epnum, 1))
- csr |= MUSB_TXCSR_H_WR_DATATOGGLE
- | MUSB_TXCSR_H_DATATOGGLE;
- else
- csr |= MUSB_TXCSR_CLRDATATOG;
+ if (!hw_ep->tx_double_buffered) {
+ if (usb_gettoggle(urb->dev, qh->epnum, 1))
+ csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MUSB_TXCSR_CLRDATATOG;
+ }
- /* twice in case of double packet buffering */
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
+ csr &= ~MUSB_TXCSR_DMAMODE;
musb_writew(epio, MUSB_TXCSR, csr);
csr = musb_readw(epio, MUSB_TXCSR);
} else {
/* endpoint 0: just flush */
- musb_writew(epio, MUSB_CSR0,
- csr | MUSB_CSR0_FLUSHFIFO);
- musb_writew(epio, MUSB_CSR0,
- csr | MUSB_CSR0_FLUSHFIFO);
+ musb_h_ep0_flush_fifo(hw_ep);
}
/* target addr and (for multipoint) hub addr/port */
@@ -718,14 +811,19 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
- if (can_bulk_split(musb, qh->type))
+ if (musb->double_buffer_not_ok) {
musb_writew(epio, MUSB_TXMAXP,
- packet_sz
- | ((hw_ep->max_packet_sz_tx /
- packet_sz) - 1) << 11);
- else
+ hw_ep->max_packet_sz_tx);
+ } else if (can_bulk_split(musb, qh->type)) {
+ qh->hb_mult = hw_ep->max_packet_sz_tx
+ / packet_sz;
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((qh->hb_mult) - 1) << 11);
+ } else {
musb_writew(epio, MUSB_TXMAXP,
- packet_sz);
+ qh->maxpacket |
+ ((qh->hb_mult - 1) << 11));
+ }
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
@@ -740,115 +838,35 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
else
load_count = min((u32) packet_sz, len);
-#ifdef CONFIG_USB_INVENTRA_DMA
- if (dma_channel) {
-
- /* clear previous state */
- csr = musb_readw(epio, MUSB_TXCSR);
- csr &= ~(MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAMODE
- | MUSB_TXCSR_DMAENAB);
- csr |= MUSB_TXCSR_MODE;
- musb_writew(epio, MUSB_TXCSR,
- csr | MUSB_TXCSR_MODE);
-
- qh->segsize = min(len, dma_channel->max_len);
-
- if (qh->segsize <= packet_sz)
- dma_channel->desired_mode = 0;
- else
- dma_channel->desired_mode = 1;
-
-
- if (dma_channel->desired_mode == 0) {
- csr &= ~(MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAMODE);
- csr |= (MUSB_TXCSR_DMAENAB);
- /* against programming guide */
- } else
- csr |= (MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAENAB
- | MUSB_TXCSR_DMAMODE);
-
- musb_writew(epio, MUSB_TXCSR, csr);
-
- dma_ok = dma_controller->channel_program(
- dma_channel, packet_sz,
- dma_channel->desired_mode,
- urb->transfer_dma,
- qh->segsize);
- if (dma_ok) {
- load_count = 0;
- } else {
- dma_controller->channel_release(dma_channel);
- if (is_out)
- hw_ep->tx_channel = NULL;
- else
- hw_ep->rx_channel = NULL;
- dma_channel = NULL;
- }
- }
-#endif
-
- /* candidate for DMA */
- if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
-
- /* program endpoint CSRs first, then setup DMA.
- * assume CPPI setup succeeds.
- * defer enabling dma.
- */
- csr = musb_readw(epio, MUSB_TXCSR);
- csr &= ~(MUSB_TXCSR_AUTOSET
- | MUSB_TXCSR_DMAMODE
- | MUSB_TXCSR_DMAENAB);
- csr |= MUSB_TXCSR_MODE;
- musb_writew(epio, MUSB_TXCSR,
- csr | MUSB_TXCSR_MODE);
-
- dma_channel->actual_len = 0L;
- qh->segsize = len;
-
- /* TX uses "rndis" mode automatically, but needs help
- * to identify the zero-length-final-packet case.
- */
- dma_ok = dma_controller->channel_program(
- dma_channel, packet_sz,
- (urb->transfer_flags
- & URB_ZERO_PACKET)
- == URB_ZERO_PACKET,
- urb->transfer_dma,
- qh->segsize);
- if (dma_ok) {
- load_count = 0;
- } else {
- dma_controller->channel_release(dma_channel);
- hw_ep->tx_channel = NULL;
- dma_channel = NULL;
-
- /* REVISIT there's an error path here that
- * needs handling: can't do dma, but
- * there's no pio buffer address...
- */
- }
- }
+ if (dma_channel && musb_tx_dma_program(dma_controller,
+ hw_ep, qh, urb, offset, len))
+ load_count = 0;
if (load_count) {
- /* ASSERT: TXCSR_DMAENAB was already cleared */
-
/* PIO to load FIFO */
qh->segsize = load_count;
- musb_write_fifo(hw_ep, load_count, buf);
- csr = musb_readw(epio, MUSB_TXCSR);
- csr &= ~(MUSB_TXCSR_DMAENAB
- | MUSB_TXCSR_DMAMODE
- | MUSB_TXCSR_AUTOSET);
- /* write CSR */
- csr |= MUSB_TXCSR_MODE;
-
- if (epnum)
- musb_writew(epio, MUSB_TXCSR, csr);
+ if (!buf) {
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ SG_MITER_ATOMIC
+ | SG_MITER_FROM_SG);
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller,
+ "error: sg"
+ "list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ goto finish;
+ }
+ buf = qh->sg_miter.addr + urb->sg->offset +
+ urb->actual_length;
+ load_count = min_t(u32, load_count,
+ qh->sg_miter.length);
+ musb_write_fifo(hw_ep, load_count, buf);
+ qh->sg_miter.consumed = load_count;
+ sg_miter_stop(&qh->sg_miter);
+ } else
+ musb_write_fifo(hw_ep, load_count, buf);
}
-
+finish:
/* re-enable interrupt */
musb_writew(mbase, MUSB_INTRTXE, int_txe);
@@ -884,42 +902,104 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* kick things off */
if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
- /* candidate for DMA */
- if (dma_channel) {
- dma_channel->actual_len = 0L;
- qh->segsize = len;
-
- /* AUTOREQ is in a DMA register */
- musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
- csr = musb_readw(hw_ep->regs,
- MUSB_RXCSR);
-
- /* unless caller treats short rx transfers as
- * errors, we dare not queue multiple transfers.
- */
- dma_ok = dma_controller->channel_program(
- dma_channel, packet_sz,
- !(urb->transfer_flags
- & URB_SHORT_NOT_OK),
- urb->transfer_dma,
- qh->segsize);
- if (!dma_ok) {
- dma_controller->channel_release(
- dma_channel);
- hw_ep->rx_channel = NULL;
- dma_channel = NULL;
- } else
- csr |= MUSB_RXCSR_DMAENAB;
- }
+ /* Candidate for DMA */
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ /*
+ * Unless caller treats short RX transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(dma_channel,
+ packet_sz, !(urb->transfer_flags &
+ URB_SHORT_NOT_OK),
+ urb->transfer_dma + offset,
+ qh->segsize);
+ if (!dma_ok) {
+ dma_controller->channel_release(dma_channel);
+ hw_ep->rx_channel = dma_channel = NULL;
+ } else
+ csr |= MUSB_RXCSR_DMAENAB;
}
csr |= MUSB_RXCSR_H_REQPKT;
- DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+ dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
}
}
+/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
+ * the end; avoids starvation for other endpoints.
+ */
+static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ int is_in)
+{
+ struct dma_channel *dma;
+ struct urb *urb;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *cur_qh, *next_qh;
+ u16 rx_csr, tx_csr;
+
+ musb_ep_select(mbase, ep->epnum);
+ if (is_in) {
+ dma = is_dma_capable() ? ep->rx_channel : NULL;
+
+ /* clear nak timeout bit */
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ cur_qh = first_qh(&musb->in_bulk);
+ } else {
+ dma = is_dma_capable() ? ep->tx_channel : NULL;
+
+ /* clear nak timeout bit */
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ tx_csr |= MUSB_TXCSR_H_WZC_BITS;
+ tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+
+ cur_qh = first_qh(&musb->out_bulk);
+ }
+ if (cur_qh) {
+ urb = next_urb(cur_qh);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ urb->actual_length += dma->actual_len;
+ dma->actual_len = 0L;
+ }
+ musb_save_toggle(cur_qh, is_in, urb);
+
+ if (is_in) {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->in_bulk);
+
+ /* get the next qh from musb->in_bulk */
+ next_qh = first_qh(&musb->in_bulk);
+
+ /* set rx_reinit and schedule the next qh */
+ ep->rx_reinit = 1;
+ } else {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->out_bulk);
+
+ /* get the next qh from musb->out_bulk */
+ next_qh = first_qh(&musb->out_bulk);
+
+ /* set tx_reinit and schedule the next qh */
+ ep->tx_reinit = 1;
+ }
+ musb_start_urb(musb, is_in, next_qh);
+ }
+}
/*
* Service the default endpoint (ep0) as host.
@@ -957,15 +1037,15 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
request = (struct usb_ctrlrequest *) urb->setup_packet;
if (!request->wLength) {
- DBG(4, "start no-DATA\n");
+ dev_dbg(musb->controller, "start no-DATA\n");
break;
} else if (request->bRequestType & USB_DIR_IN) {
- DBG(4, "start IN-DATA\n");
+ dev_dbg(musb->controller, "start IN-DATA\n");
musb->ep0_stage = MUSB_EP0_IN;
more = true;
break;
} else {
- DBG(4, "start OUT-DATA\n");
+ dev_dbg(musb->controller, "start OUT-DATA\n");
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
@@ -977,7 +1057,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
- DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
+ dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
@@ -1022,7 +1102,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
? musb_readb(epio, MUSB_COUNT0)
: 0;
- DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
csr, qh, len, urb, musb->ep0_stage);
/* if we just did status stage, we are done */
@@ -1033,19 +1113,20 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
/* prepare status */
if (csr & MUSB_CSR0_H_RXSTALL) {
- DBG(6, "STALLING ENDPOINT\n");
+ dev_dbg(musb->controller, "STALLING ENDPOINT\n");
status = -EPIPE;
} else if (csr & MUSB_CSR0_H_ERROR) {
- DBG(2, "no response, csr0 %04x\n", csr);
+ dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
status = -EPROTO;
} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
- DBG(2, "control NAK timeout\n");
+ dev_dbg(musb->controller, "control NAK timeout\n");
/* NOTE: this code path would be a good place to PAUSE a
* control transfer, if another one is queued, so that
- * ep0 is more likely to stay busy.
+ * ep0 is more likely to stay busy. That's already done
+ * for bulk RX transfers.
*
* if (qh->ring.next != &musb->control), then
* we have a candidate... NAKing is *NOT* an error
@@ -1055,7 +1136,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
}
if (status) {
- DBG(6, "aborting\n");
+ dev_dbg(musb->controller, "aborting\n");
retval = IRQ_HANDLED;
if (urb)
urb->status = status;
@@ -1068,11 +1149,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
musb_writew(epio, MUSB_CSR0, csr);
} else {
- csr |= MUSB_CSR0_FLUSHFIFO;
- musb_writew(epio, MUSB_CSR0, csr);
- musb_writew(epio, MUSB_CSR0, csr);
- csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
- musb_writew(epio, MUSB_CSR0, csr);
+ musb_h_ep0_flush_fifo(hw_ep);
}
musb_writeb(epio, MUSB_NAKLIMIT0, 0);
@@ -1086,10 +1163,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
* SHOULD NEVER HAPPEN! */
ERR("no URB for end 0\n");
- musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
- musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
- musb_writew(epio, MUSB_CSR0, 0);
-
+ musb_h_ep0_flush_fifo(hw_ep);
goto done;
}
@@ -1109,10 +1183,13 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
+ /* disable ping token in status phase */
+ csr |= MUSB_CSR0_H_DIS_PING;
+
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
- DBG(5, "ep0 STATUS, csr %04x\n", csr);
+ dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
}
musb_writew(epio, MUSB_CSR0, csr);
@@ -1150,64 +1227,71 @@ void musb_host_tx(struct musb *musb, u8 epnum)
int pipe;
bool done = false;
u16 tx_csr;
- size_t wLength = 0;
- u8 *buf = NULL;
- struct urb *urb;
+ size_t length = 0;
+ size_t offset = 0;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
- struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
- : hw_ep->out_qh;
+ struct musb_qh *qh = hw_ep->out_qh;
+ struct urb *urb = next_urb(qh);
u32 status = 0;
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
-
- urb = next_urb(qh);
+ bool transfer_pending = false;
musb_ep_select(mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
/* with CPPI, DMA sometimes triggers "extra" irqs */
if (!urb) {
- DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
- goto finish;
+ dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ return;
}
pipe = urb->pipe;
dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
- DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+ dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
dma ? ", dma" : "");
/* check for errors */
if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
/* dma was disabled, fifo flushed */
- DBG(3, "TX end %d stall\n", epnum);
+ dev_dbg(musb->controller, "TX end %d stall\n", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
/* (NON-ISO) dma was disabled, fifo flushed */
- DBG(3, "TX 3strikes on ep=%d\n", epnum);
+ dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
- DBG(6, "TX end=%d device not responding\n", epnum);
-
- /* NOTE: this code path would be a good place to PAUSE a
- * transfer, if there's some other (nonperiodic) tx urb
- * that could use this fifo. (dma complicates it...)
- *
- * if (bulk && qh->ring.next != &musb->out_bulk), then
- * we have a candidate... NAKing is *NOT* an error
- */
- musb_ep_select(mbase, epnum);
- musb_writew(epio, MUSB_TXCSR,
- MUSB_TXCSR_H_WZC_BITS
- | MUSB_TXCSR_TXPKTRDY);
- goto finish;
+ if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
+ && !list_is_singular(&musb->out_bulk)) {
+ dev_dbg(musb->controller,
+ "NAK timeout on TX%d ep\n", epnum);
+ musb_bulk_nak_timeout(musb, hw_ep, 0);
+ } else {
+ dev_dbg(musb->controller,
+ "TX end=%d device not responding\n", epnum);
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ * That's already done for bulk RX transfers.
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ }
+ return;
}
+done:
if (status) {
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
@@ -1236,32 +1320,92 @@ void musb_host_tx(struct musb *musb, u8 epnum)
/* second cppi case */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
- goto finish;
+ dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ return;
+ }
+ if (is_dma_capable() && dma && !status) {
+ /*
+ * DMA has completed. But if we're using DMA mode 1 (multi
+ * packet DMA), we need a terminal TXPKTRDY interrupt before
+ * we can consider this transfer completed, lest we trash
+ * its last packet when writing the next URB's data. So we
+ * switch back to mode 0 to get that interrupt; we'll come
+ * back here once it happens.
+ */
+ if (tx_csr & MUSB_TXCSR_DMAMODE) {
+ /*
+ * We shouldn't clear DMAMODE with DMAENAB set; so
+ * clear them in a safe order. That should be OK
+ * once TXPKTRDY has been set (and I've never seen
+ * it being 0 at this moment -- DMA interrupt latency
+ * is significant) but if it hasn't been then we have
+ * no choice but to stop being polite and ignore the
+ * programmer's guide... :-)
+ *
+ * Note that we must write TXCSR with TXPKTRDY cleared
+ * in order not to re-trigger the packet send (this bit
+ * can't be cleared by CPU), and there's another caveat:
+ * TXPKTRDY may be set shortly and then cleared in the
+ * double-buffered FIFO mode, so we do an extra TXCSR
+ * read for debouncing...
+ */
+ tx_csr &= musb_readw(epio, MUSB_TXCSR);
+ if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
+ tx_csr &= ~(MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR,
+ tx_csr | MUSB_TXCSR_H_WZC_BITS);
+ }
+ tx_csr &= ~(MUSB_TXCSR_DMAMODE |
+ MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR,
+ tx_csr | MUSB_TXCSR_H_WZC_BITS);
+
+ /*
+ * There is no guarantee that we'll get an interrupt
+ * after clearing DMAMODE as we might have done this
+ * too late (after TXPKTRDY was cleared by controller).
+ * Re-read TXCSR as we have spoiled its previous value.
+ */
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ }
+
+ /*
+ * We may get here from a DMA completion or TXPKTRDY interrupt.
+ * In any case, we must check the FIFO status here and bail out
+ * only if the FIFO still has data -- that should prevent the
+ * "missed" TXPKTRDY interrupts and deal with double-buffered
+ * FIFO mode too...
+ */
+ if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
+ dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
+ "CSR %04x\n", tx_csr);
+ return;
+ }
}
- /* REVISIT this looks wrong... */
if (!status || dma || usb_pipeisoc(pipe)) {
if (dma)
- wLength = dma->actual_len;
+ length = dma->actual_len;
else
- wLength = qh->segsize;
- qh->offset += wLength;
+ length = qh->segsize;
+ qh->offset += length;
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
- d->actual_length = qh->segsize;
+ d->actual_length = length;
+ d->status = status;
if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
} else {
d++;
- buf = urb->transfer_buffer + d->offset;
- wLength = d->length;
+ offset = d->offset;
+ length = d->length;
}
- } else if (dma) {
+ } else if (dma && urb->transfer_buffer_length == qh->offset) {
done = true;
} else {
/* see if we need to send more data, or ZLP */
@@ -1272,10 +1416,9 @@ void musb_host_tx(struct musb *musb, u8 epnum)
& URB_ZERO_PACKET))
done = true;
if (!done) {
- buf = urb->transfer_buffer
- + qh->offset;
- wLength = urb->transfer_buffer_length
- - qh->offset;
+ offset = qh->offset;
+ length = urb->transfer_buffer_length - offset;
+ transfer_pending = true;
}
}
}
@@ -1294,28 +1437,65 @@ void musb_host_tx(struct musb *musb, u8 epnum)
urb->status = status;
urb->actual_length = qh->offset;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+ return;
+ } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
+ if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
+ offset, length)) {
+ if (is_cppi_enabled() || tusb_dma_omap())
+ musb_h_tx_dma_start(hw_ep);
+ return;
+ }
+ } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
+ dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
+ return;
+ }
- } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
- /* WARN_ON(!buf); */
+ /*
+ * PIO: start next packet in this URB.
+ *
+ * REVISIT: some docs say that when hw_ep->tx_double_buffered,
+ * (and presumably, FIFO is not half-full) we should write *two*
+ * packets before updating TXCSR; other docs disagree...
+ */
+ if (length > qh->maxpacket)
+ length = qh->maxpacket;
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer)
+ qh->use_sg = true;
+
+ if (qh->use_sg) {
+ /* sg_miter_start is already done in musb_ep_program */
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ goto done;
+ }
+ urb->transfer_buffer = qh->sg_miter.addr;
+ length = min_t(u32, length, qh->sg_miter.length);
+ musb_write_fifo(hw_ep, length, urb->transfer_buffer);
+ qh->sg_miter.consumed = length;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+ }
- /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
- * (and presumably, fifo is not half-full) we should write TWO
- * packets before updating TXCSR ... other docs disagree ...
- */
- /* PIO: start next packet in this URB */
- if (wLength > qh->maxpacket)
- wLength = qh->maxpacket;
- musb_write_fifo(hw_ep, wLength, buf);
- qh->segsize = wLength;
+ qh->segsize = length;
- musb_ep_select(mbase, epnum);
- musb_writew(epio, MUSB_TXCSR,
- MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
- } else
- DBG(1, "not complete, but dma enabled?\n");
+ if (qh->use_sg) {
+ if (offset + length >= urb->transfer_buffer_length)
+ qh->use_sg = false;
+ }
-finish:
- return;
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
}
@@ -1376,6 +1556,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
bool done = false;
u32 status;
struct dma_channel *dma;
+ unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
musb_ep_select(mbase, epnum);
@@ -1392,7 +1573,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
* usbtest #11 (unlinks) triggers it regularly, sometimes
* with fifo full. (Only with DMA??)
*/
- DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+ dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
musb_readw(epio, MUSB_RXCOUNT));
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
return;
@@ -1400,20 +1581,20 @@ void musb_host_rx(struct musb *musb, u8 epnum)
pipe = urb->pipe;
- DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+ dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
epnum, rx_csr, urb->actual_length,
dma ? dma->actual_len : 0);
/* check for errors, concurrent stall & unlink is not really
* handled yet! */
if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
- DBG(3, "RX end %d STALL\n", epnum);
+ dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
- DBG(3, "end %d RX proto error\n", epnum);
+ dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
@@ -1421,25 +1602,37 @@ void musb_host_rx(struct musb *musb, u8 epnum)
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
- /* NOTE this code path would be a good place to PAUSE a
- * transfer, if there's some other (nonperiodic) rx urb
- * that could use this fifo. (dma complicates it...)
+ dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
+
+ /* NOTE: NAKing is *NOT* an error, so we want to
+ * continue. Except ... if there's a request for
+ * another QH, use that instead of starving it.
*
- * if (bulk && qh->ring.next != &musb->in_bulk), then
- * we have a candidate... NAKing is *NOT* an error
+ * Devices like Ethernet and serial adapters keep
+ * reads posted at all times, which will starve
+ * other devices without this logic.
*/
- DBG(6, "RX end %d NAK timeout\n", epnum);
+ if (usb_pipebulk(urb->pipe)
+ && qh->mux == 1
+ && !list_is_singular(&musb->in_bulk)) {
+ musb_bulk_nak_timeout(musb, hw_ep, 1);
+ return;
+ }
musb_ep_select(mbase, epnum);
- musb_writew(epio, MUSB_RXCSR,
- MUSB_RXCSR_H_WZC_BITS
- | MUSB_RXCSR_H_REQPKT);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
goto finish;
} else {
- DBG(4, "RX end %d ISO data error\n", epnum);
+ dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
/* packet error reported later */
iso_err = true;
}
+ } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
+ dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
+ epnum);
+ status = -EPROTO;
}
/* faults abort the transfer */
@@ -1469,7 +1662,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
-#ifndef CONFIG_USB_INVENTRA_DMA
+#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
@@ -1483,7 +1676,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
done = true;
}
- DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+ dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
xfer_len, dma ? ", dma" : "");
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
@@ -1501,7 +1694,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
@@ -1511,13 +1705,33 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* even if there was an error, we did the dma
* for iso_frame_desc->length
*/
- if (d->status != EILSEQ && d->status != -EOVERFLOW)
+ if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
- if (++qh->iso_idx >= urb->number_of_packets)
+ if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
- else
+ } else {
+#if defined(CONFIG_USB_TI_CPPI41_DMA)
+ struct dma_controller *c;
+ dma_addr_t *buf;
+ u32 length, ret;
+
+ c = musb->dma_controller;
+ buf = (void *)
+ urb->iso_frame_desc[qh->iso_idx].offset
+ + (u32)urb->transfer_dma;
+
+ length =
+ urb->iso_frame_desc[qh->iso_idx].length;
+
+ val |= MUSB_RXCSR_DMAENAB;
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+ ret = c->channel_program(dma, qh->maxpacket,
+ 0, (u32) buf, length);
+#endif
done = false;
+ }
} else {
/* done if urb buffer is full or short packet is recd */
@@ -1533,7 +1747,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
MUSB_RXCSR_H_WZC_BITS | val);
}
- DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+ dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
done ? "off" : "reset",
musb_readw(epio, MUSB_RXCSR),
musb_readw(epio, MUSB_RXCOUNT));
@@ -1557,7 +1771,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (dma) {
struct dma_controller *c;
u16 rx_count;
@@ -1566,37 +1781,37 @@ void musb_host_rx(struct musb *musb, u8 epnum)
rx_count = musb_readw(epio, MUSB_RXCOUNT);
- DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+ dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n",
epnum, rx_count,
- urb->transfer_dma
- + urb->actual_length,
+ (unsigned long long) urb->transfer_dma
+ + urb->actual_length,
qh->offset,
urb->transfer_buffer_length);
c = musb->dma_controller;
if (usb_pipeisoc(pipe)) {
- int status = 0;
+ int d_status = 0;
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
if (iso_err) {
- status = -EILSEQ;
+ d_status = -EILSEQ;
urb->error_count++;
}
if (rx_count > d->length) {
- if (status == 0) {
- status = -EOVERFLOW;
+ if (d_status == 0) {
+ d_status = -EOVERFLOW;
urb->error_count++;
}
- DBG(2, "** OVERFLOW %d into %d\n",\
+ dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
rx_count, d->length);
length = d->length;
} else
length = rx_count;
- d->status = status;
+ d->status = d_status;
buf = urb->transfer_dma + d->offset;
} else {
length = rx_count;
@@ -1617,7 +1832,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
dma->desired_mode = 1;
if (rx_count < hw_ep->max_packet_sz_rx) {
length = rx_count;
- dma->bDesiredMode = 0;
+ dma->desired_mode = 0;
} else {
length = urb->transfer_buffer_length;
}
@@ -1647,7 +1862,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
val &= ~MUSB_RXCSR_H_AUTOREQ;
else
val |= MUSB_RXCSR_H_AUTOREQ;
- val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+ val |= MUSB_RXCSR_DMAENAB;
+
+ /* autoclear shouldn't be set in high bandwidth */
+ if (qh->hb_mult == 1)
+ val |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | val);
@@ -1664,15 +1883,54 @@ void musb_host_rx(struct musb *musb, u8 epnum)
c->channel_release(dma);
hw_ep->rx_channel = NULL;
dma = NULL;
- /* REVISIT reset CSR */
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+ musb_writew(epio, MUSB_RXCSR, val);
}
}
#endif /* Mentor DMA */
if (!dma) {
- done = musb_host_packet_rx(musb, urb,
- epnum, iso_err);
- DBG(6, "read %spacket\n", done ? "last " : "");
+ unsigned int received_len;
+
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer) {
+ qh->use_sg = true;
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ sg_flags);
+ }
+
+ if (qh->use_sg) {
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ done = true;
+ goto finish;
+ }
+ urb->transfer_buffer = qh->sg_miter.addr;
+ received_len = urb->actual_length;
+ qh->offset = 0x0;
+ done = musb_host_packet_rx(musb, urb, epnum,
+ iso_err);
+ /* Calculate the number of bytes received */
+ received_len = urb->actual_length -
+ received_len;
+ qh->sg_miter.consumed = received_len;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ done = musb_host_packet_rx(musb, urb,
+ epnum, iso_err);
+ }
+ dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
}
}
@@ -1680,6 +1938,9 @@ finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
+ if (qh->use_sg)
+ qh->use_sg = false;
+
if (urb->status == -EINPROGRESS)
urb->status = status;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
@@ -1701,6 +1962,9 @@ static int musb_schedule(
int best_end, epnum;
struct musb_hw_ep *hw_ep = NULL;
struct list_head *head = NULL;
+ u8 toggle;
+ u8 txtype;
+ struct urb *urb = next_urb(qh);
/* use fixed hardware for control and bulk */
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
@@ -1711,40 +1975,55 @@ static int musb_schedule(
/* else, periodic transfers get muxed to other endpoints */
- /* FIXME this doesn't consider direction, so it can only
- * work for one half of the endpoint hardware, and assumes
- * the previous cases handled all non-shared endpoints...
- */
-
- /* we know this qh hasn't been scheduled, so all we need to do
+ /*
+ * We know this qh hasn't been scheduled, so all we need to do
* is choose which hardware endpoint to put it on ...
*
* REVISIT what we really want here is a regular schedule tree
- * like e.g. OHCI uses, but for now musb->periodic is just an
- * array of the _single_ logical endpoint associated with a
- * given physical one (identity mapping logical->physical).
- *
- * that simplistic approach makes TT scheduling a lot simpler;
- * there is none, and thus none of its complexity...
+ * like e.g. OHCI uses.
*/
best_diff = 4096;
best_end = -1;
- for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+ for (epnum = 1, hw_ep = musb->endpoints + 1;
+ epnum < musb->nr_endpoints;
+ epnum++, hw_ep++) {
int diff;
- if (musb->periodic[epnum])
+ if (musb_ep_get_qh(hw_ep, is_in) != NULL)
continue;
- hw_ep = &musb->endpoints[epnum];
+
if (hw_ep == musb->bulk_ep)
continue;
if (is_in)
- diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+ diff = hw_ep->max_packet_sz_rx;
else
- diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+ diff = hw_ep->max_packet_sz_tx;
+ diff -= (qh->maxpacket * qh->hb_mult);
if (diff >= 0 && best_diff > diff) {
+
+ /*
+ * Mentor controller has a bug in that if we schedule
+ * a BULK Tx transfer on an endpoint that had earlier
+ * handled ISOC then the BULK transfer has to start on
+ * a zero toggle. If the BULK transfer starts on a 1
+ * toggle then this transfer will fail as the mentor
+ * controller starts the Bulk transfer on a 0 toggle
+ * irrespective of the programming of the toggle bits
+ * in the TXCSR register. Check for this condition
+ * while allocating the EP for a Tx Bulk transfer. If
+ * so skip this EP.
+ */
+ hw_ep = musb->endpoints + epnum;
+ toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
+ txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
+ >> 4) & 0x3;
+ if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
+ toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
+ continue;
+
best_diff = diff;
best_end = epnum;
}
@@ -1756,6 +2035,17 @@ static int musb_schedule(
head = &musb->in_bulk;
else
head = &musb->out_bulk;
+
+ /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
+ * multiplexed. This scheme does not work in high speed to full
+ * speed scenario as NAK interrupts are not coming from a
+ * full speed device connected to a high speed device.
+ * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
+ * 4 (8 frame or 8ms) for FS device.
+ */
+ if (qh->dev)
+ qh->intv_reg =
+ (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;
} else if (best_end < 0) {
return -ENOSPC;
@@ -1764,8 +2054,7 @@ static int musb_schedule(
idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end;
- musb->periodic[best_end] = qh;
- DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+ dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
success:
if (head) {
idle = list_empty(head);
@@ -1787,7 +2076,7 @@ static int musb_urb_enqueue(
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
struct usb_host_endpoint *hep = urb->ep;
- struct musb_qh *qh = hep->hcpriv;
+ struct musb_qh *qh;
struct usb_endpoint_descriptor *epd = &hep->desc;
int ret;
unsigned type_reg;
@@ -1799,22 +2088,21 @@ static int musb_urb_enqueue(
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ qh = ret ? NULL : hep->hcpriv;
+ if (qh)
+ urb->hcpriv = qh;
spin_unlock_irqrestore(&musb->lock, flags);
- if (ret)
- return ret;
/* DMA mapping was already done, if needed, and this urb is on
- * hep->urb_list ... so there's little to do unless hep wasn't
- * yet scheduled onto a live qh.
+ * hep->urb_list now ... so we're done, unless hep wasn't yet
+ * scheduled onto a live qh.
*
* REVISIT best to keep hep->hcpriv valid until the endpoint gets
* disabled, testing for empty qh->ring and avoiding qh setup costs
* except for the first urb queued after a config change.
*/
- if (qh) {
- urb->hcpriv = qh;
- return 0;
- }
+ if (qh || ret)
+ return ret;
/* Allocate and initialize qh, minimizing the work done each time
* hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
@@ -1835,16 +2123,28 @@ static int musb_urb_enqueue(
INIT_LIST_HEAD(&qh->ring);
qh->is_ready = 1;
- qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+ qh->maxpacket = usb_endpoint_maxp(epd);
+ qh->type = usb_endpoint_type(epd);
- /* no high bandwidth support yet */
- if (qh->maxpacket & ~0x7ff) {
- ret = -EMSGSIZE;
- goto done;
+ /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
+ * Some musb cores don't support high bandwidth ISO transfers; and
+ * we don't (yet!) support high bandwidth interrupt transfers.
+ */
+ qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+ if (qh->hb_mult > 1) {
+ int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
+
+ if (ok)
+ ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
+ || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
+ if (!ok) {
+ ret = -EMSGSIZE;
+ goto done;
+ }
+ qh->maxpacket &= 0x7ff;
}
qh->epnum = usb_endpoint_num(epd);
- qh->type = usb_endpoint_type(epd);
/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
@@ -1888,13 +2188,11 @@ static int musb_urb_enqueue(
*
* The downside of disabling this is that transfer scheduling
* gets VERY unfair for nonperiodic transfers; a misbehaving
- * peripheral could make that hurt. Or for reads, one that's
- * perfectly normal: network and other drivers keep reads
- * posted at all times, having one pending for a week should
- * be perfectly safe.
+ * peripheral could make that hurt. That's perfectly normal
+ * for reads from network or serial adapters ... so we have
+ * partial NAKlimit support for bulk RX.
*
- * The upside of disabling it is avoidng transfer scheduling
- * code to put this aside for while.
+ * The upside of disabling it is simpler transfer scheduling.
*/
interval = 0;
}
@@ -1924,11 +2222,12 @@ static int musb_urb_enqueue(
* we only have work to do in the former case.
*/
spin_lock_irqsave(&musb->lock, flags);
- if (hep->hcpriv) {
+ if (hep->hcpriv || !next_urb(qh)) {
/* some concurrent activity submitted another urb to hep...
* odd, rare, error prone, but legal.
*/
kfree(qh);
+ qh = NULL;
ret = 0;
} else
ret = musb_schedule(musb, qh,
@@ -1958,14 +2257,16 @@ done:
* called with controller locked, irqs blocked
* that hardware queue advances to the next transfer, unless prevented
*/
-static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{
struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
unsigned hw_end = ep->epnum;
void __iomem *regs = ep->musb->mregs;
- u16 csr;
+ int is_in = usb_pipein(urb->pipe);
int status = 0;
+ u16 csr;
musb_ep_select(regs, hw_end);
@@ -1975,7 +2276,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
- DBG(status ? 1 : 3,
+ dev_dbg(musb->controller,
"abort %cX%d DMA for urb %p --> %d\n",
is_in ? 'R' : 'T', ep->epnum,
urb, status);
@@ -1984,7 +2285,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
}
/* turn off DMA requests, discard state, stop polling ... */
- if (is_in) {
+ if (ep->epnum && is_in) {
/* giveback saves bulk toggle */
csr = musb_h_flush_rxfifo(ep, 0);
@@ -1992,7 +2293,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
* endpoint's irq status here to avoid bogus irqs.
* clearing that status is platform-specific...
*/
- } else {
+ } else if (ep->epnum) {
musb_h_tx_flush_fifo(ep);
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_AUTOSET
@@ -2006,6 +2307,8 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
musb_writew(epio, MUSB_TXCSR, csr);
/* flush cpu writebuffer */
csr = musb_readw(epio, MUSB_TXCSR);
+ } else {
+ musb_h_ep0_flush_fifo(ep);
}
if (status == 0)
musb_advance_schedule(ep->musb, urb, ep, is_in);
@@ -2016,14 +2319,14 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct musb *musb = hcd_to_musb(hcd);
struct musb_qh *qh;
- struct list_head *sched;
unsigned long flags;
+ int is_in = usb_pipein(urb->pipe);
int ret;
- DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+ dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out");
+ is_in ? "in" : "out");
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -2034,47 +2337,25 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (!qh)
goto done;
- /* Any URB not actively programmed into endpoint hardware can be
+ /*
+ * Any URB not actively programmed into endpoint hardware can be
* immediately given back; that's any URB not at the head of an
* endpoint queue, unless someday we get real DMA queues. And even
* if it's at the head, it might not be known to the hardware...
*
- * Otherwise abort current transfer, pending dma, etc.; urb->status
+ * Otherwise abort current transfer, pending DMA, etc.; urb->status
* has already been updated. This is a synchronous abort; it'd be
* OK to hold off until after some IRQ, though.
+ *
+ * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
*/
- if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
- ret = -EINPROGRESS;
- else {
- switch (qh->type) {
- case USB_ENDPOINT_XFER_CONTROL:
- sched = &musb->control;
- break;
- case USB_ENDPOINT_XFER_BULK:
- if (qh->mux == 1) {
- if (usb_pipein(urb->pipe))
- sched = &musb->in_bulk;
- else
- sched = &musb->out_bulk;
- break;
- }
- default:
- /* REVISIT when we get a schedule tree, periodic
- * transfers won't always be at the head of a
- * singleton queue...
- */
- sched = NULL;
- break;
- }
- }
-
- /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
- if (ret < 0 || (sched && qh != first_qh(sched))) {
+ if (!qh->is_ready
+ || urb->urb_list.prev != &qh->hep->urb_list
+ || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
int ready = qh->is_ready;
- ret = 0;
qh->is_ready = 0;
- __musb_giveback(musb, urb, 0);
+ musb_giveback(musb, urb, 0);
qh->is_ready = ready;
/* If nothing else (usually musb_giveback) is using it
@@ -2086,7 +2367,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
kfree(qh);
}
} else
- ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ ret = musb_cleanup_urb(urb, qh);
done:
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
@@ -2096,13 +2377,11 @@ done:
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
- u8 epnum = hep->desc.bEndpointAddress;
+ u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
- u8 is_in = epnum & USB_DIR_IN;
struct musb_qh *qh;
struct urb *urb;
- struct list_head *sched;
spin_lock_irqsave(&musb->lock, flags);
@@ -2110,31 +2389,11 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
if (qh == NULL)
goto exit;
- switch (qh->type) {
- case USB_ENDPOINT_XFER_CONTROL:
- sched = &musb->control;
- break;
- case USB_ENDPOINT_XFER_BULK:
- if (qh->mux == 1) {
- if (is_in)
- sched = &musb->in_bulk;
- else
- sched = &musb->out_bulk;
- break;
- }
- default:
- /* REVISIT when we get a schedule tree, periodic transfers
- * won't always be at the head of a singleton queue...
- */
- sched = NULL;
- break;
- }
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
- /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
-
- /* kick first urb off the hardware, if needed */
+ /* Kick the first URB off the hardware, if needed */
qh->is_ready = 0;
- if (!sched || qh == first_qh(sched)) {
+ if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
urb = next_urb(qh);
/* make software (then hardware) stop ASAP */
@@ -2142,7 +2401,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
urb->status = -ESHUTDOWN;
/* cleanup */
- musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ musb_cleanup_urb(urb, qh);
/* Then nuke all the others ... and advance the
* queue on hw_ep (e.g. bulk ring) when we're done.
@@ -2158,7 +2417,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
* will activate any of these as it advances.
*/
while (!list_empty(&hep->urb_list))
- __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+ musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
hep->hcpriv = NULL;
list_del(&qh->ring);
@@ -2196,13 +2455,32 @@ static void musb_h_stop(struct usb_hcd *hcd)
static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
+ u8 devctl;
+
+ musb_port_suspend(musb, true);
- if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+ if (!is_host_active(musb))
return 0;
- if (is_host_active(musb) && musb->is_active) {
- WARNING("trying to suspend as %s is_active=%i\n",
- otg_state_string(musb), musb->is_active);
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_SUSPEND:
+ return 0;
+ case OTG_STATE_A_WAIT_VRISE:
+ /* ID could be grounded even if there's no device
+ * on the other end of the cable. NOTE that the
+ * A_WAIT_VRISE timers are messy with MUSB...
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ break;
+ default:
+ break;
+ }
+
+ if (musb->is_active) {
+ WARNING("trying to suspend as %s while active\n",
+ usb_otg_state_string(musb->xceiv->state));
return -EBUSY;
} else
return 0;
@@ -2210,14 +2488,130 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
static int musb_bus_resume(struct usb_hcd *hcd)
{
- /* resuming child port does the work */
+ struct musb *musb = hcd_to_musb(hcd);
+
+ if (musb->config &&
+ musb->config->host_port_deassert_reset_at_resume)
+ musb_port_reset(musb, false);
+
+ return 0;
+}
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+
+#define MUSB_USB_DMA_ALIGN 4
+
+struct musb_temp_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
+static void musb_free_temp_buffer(struct urb *urb)
+{
+ enum dma_data_direction dir;
+ struct musb_temp_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
+ data);
+
+ if (dir == DMA_FROM_DEVICE) {
+ memcpy(temp->old_xfer_buffer, temp->data,
+ urb->transfer_buffer_length);
+ }
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ enum dma_data_direction dir;
+ struct musb_temp_buffer *temp;
+ void *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct temp_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
+
+
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (dir == DMA_TO_DEVICE)
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
return 0;
}
-const struct hc_driver musb_hc_driver = {
+static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int ret;
+
+ /*
+ * The DMA engine in RTL1.8 and above cannot handle
+ * DMA addresses that are not aligned to a 4 byte boundary.
+ * For such engine implemented (un)map_urb_for_dma hooks.
+ * Do not use these hooks for RTL<1.8
+ */
+ if (musb->hwvers < MUSB_HWVERS_1800)
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+
+ ret = musb_alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ musb_free_temp_buffer(urb);
+
+ return ret;
+}
+
+static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+
+ /* Do not use this hook for RTL<1.8 (see description above) */
+ if (musb->hwvers < MUSB_HWVERS_1800)
+ return;
+
+ musb_free_temp_buffer(urb);
+}
+#endif /* !CONFIG_MUSB_PIO_ONLY */
+
+static const struct hc_driver musb_hc_driver = {
.description = "musb-hcd",
.product_desc = "MUSB HDRC host driver",
- .hcd_priv_size = sizeof(struct musb),
+ .hcd_priv_size = sizeof(struct musb *),
.flags = HCD_USB2 | HCD_MEMORY,
/* not using irq handler or reset hooks from usbcore, since
@@ -2233,6 +2627,11 @@ const struct hc_driver musb_hc_driver = {
.urb_dequeue = musb_urb_dequeue,
.endpoint_disable = musb_h_disable,
+#ifndef CONFIG_MUSB_PIO_ONLY
+ .map_urb_for_dma = musb_map_urb_for_dma,
+ .unmap_urb_for_dma = musb_unmap_urb_for_dma,
+#endif
+
.hub_status_data = musb_hub_status_data,
.hub_control = musb_hub_control,
.bus_suspend = musb_bus_suspend,
@@ -2240,3 +2639,69 @@ const struct hc_driver musb_hc_driver = {
/* .start_port_reset = NULL, */
/* .hub_irq_enable = NULL, */
};
+
+int musb_host_alloc(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+
+ /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+ musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
+ if (!musb->hcd)
+ return -EINVAL;
+
+ *musb->hcd->hcd_priv = (unsigned long) musb;
+ musb->hcd->self.uses_pio_for_control = 1;
+ musb->hcd->uses_new_polling = 1;
+ musb->hcd->has_tt = 1;
+
+ return 0;
+}
+
+void musb_host_cleanup(struct musb *musb)
+{
+ if (musb->port_mode == MUSB_PORT_MODE_GADGET)
+ return;
+ usb_remove_hcd(musb->hcd);
+ musb->hcd = NULL;
+}
+
+void musb_host_free(struct musb *musb)
+{
+ usb_put_hcd(musb->hcd);
+}
+
+int musb_host_setup(struct musb *musb, int power_budget)
+{
+ int ret;
+ struct usb_hcd *hcd = musb->hcd;
+
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+
+ otg_set_host(musb->xceiv->otg, &hcd->self);
+ hcd->self.otg_port = 1;
+ musb->xceiv->otg->host = &hcd->self;
+ hcd->power_budget = 2 * (power_budget ? : 250);
+
+ ret = usb_add_hcd(hcd, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+}
+
+void musb_host_resume_root_hub(struct musb *musb)
+{
+ usb_hcd_resume_root_hub(musb->hcd);
+}
+
+void musb_host_poke_root_hub(struct musb *musb)
+{
+ MUSB_HST_MODE(musb);
+ if (musb->hcd->status_urb)
+ usb_hcd_poll_rh_status(musb->hcd);
+ else
+ usb_hcd_resume_root_hub(musb->hcd);
+}
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 0b7fbcd2196..7bbf01bf4bb 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -35,15 +35,7 @@
#ifndef _MUSB_HOST_H
#define _MUSB_HOST_H
-static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
-{
- return container_of((void *) musb, struct usb_hcd, hcd_priv);
-}
-
-static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
-{
- return (struct musb *) (hcd->hcd_priv);
-}
+#include <linux/scatterlist.h>
/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
struct musb_qh {
@@ -67,9 +59,12 @@ struct musb_qh {
u8 is_ready; /* safe to modify hw_ep */
u8 type; /* XFERTYPE_* */
u8 epnum;
+ u8 hb_mult; /* high bandwidth pkts per uf */
u16 maxpacket;
u16 frame; /* for periodic schedule */
unsigned iso_idx; /* in urb->iso_frame_desc[] */
+ struct sg_mapping_iter sg_miter; /* for highmem in PIO mode */
+ bool use_sg; /* to track urb using sglist */
};
/* map from control or bulk queue head to the first qh on that ring */
@@ -81,7 +76,58 @@ static inline struct musb_qh *first_qh(struct list_head *q)
}
+#if IS_ENABLED(CONFIG_USB_MUSB_HOST) || IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
+extern struct musb *hcd_to_musb(struct usb_hcd *);
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern int musb_host_alloc(struct musb *);
+extern int musb_host_setup(struct musb *, int);
+extern void musb_host_cleanup(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+extern void musb_root_disconnect(struct musb *musb);
+extern void musb_host_free(struct musb *);
+extern void musb_host_cleanup(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
extern void musb_root_disconnect(struct musb *musb);
+extern void musb_host_resume_root_hub(struct musb *musb);
+extern void musb_host_poke_root_hub(struct musb *musb);
+extern void musb_port_suspend(struct musb *musb, bool do_suspend);
+extern void musb_port_reset(struct musb *musb, bool do_reset);
+extern void musb_host_finish_resume(struct work_struct *work);
+#else
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return NULL;
+}
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+ return 0;
+}
+
+static inline int musb_host_alloc(struct musb *musb)
+{
+ return 0;
+}
+
+static inline int musb_host_setup(struct musb *musb, int power_budget)
+{
+ return 0;
+}
+
+static inline void musb_host_cleanup(struct musb *musb) {}
+static inline void musb_host_free(struct musb *musb) {}
+static inline void musb_host_tx(struct musb *musb, u8 epnum) {}
+static inline void musb_host_rx(struct musb *musb, u8 epnum) {}
+static inline void musb_root_disconnect(struct musb *musb) {}
+static inline void musb_host_resume_root_hub(struct musb *musb) {}
+static inline void musb_host_poll_rh_status(struct musb *musb) {}
+static inline void musb_host_poke_root_hub(struct musb *musb) {}
+static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
+static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
+static inline void musb_host_finish_resume(struct work_struct *work) {}
+#endif
struct usb_hcd;
@@ -90,11 +136,8 @@ extern int musb_hub_control(struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
-extern const struct hc_driver musb_hc_driver;
-
static inline struct urb *next_urb(struct musb_qh *qh)
{
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
struct list_head *queue;
if (!qh)
@@ -103,9 +146,6 @@ static inline struct urb *next_urb(struct musb_qh *qh)
if (list_empty(queue))
return NULL;
return list_entry(queue->next, struct urb, urb_list);
-#else
- return NULL;
-#endif
}
#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index b06e9ef00cf..eebeed78edd 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -37,25 +37,6 @@
#include <linux/io.h>
-#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
- && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
- && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN)
-static inline void readsl(const void __iomem *addr, void *buf, int len)
- { insl((unsigned long)addr, buf, len); }
-static inline void readsw(const void __iomem *addr, void *buf, int len)
- { insw((unsigned long)addr, buf, len); }
-static inline void readsb(const void __iomem *addr, void *buf, int len)
- { insb((unsigned long)addr, buf, len); }
-
-static inline void writesl(const void __iomem *addr, const void *buf, int len)
- { outsl((unsigned long)addr, buf, len); }
-static inline void writesw(const void __iomem *addr, const void *buf, int len)
- { outsw((unsigned long)addr, buf, len); }
-static inline void writesb(const void __iomem *addr, const void *buf, int len)
- { outsb((unsigned long)addr, buf, len); }
-
-#endif
-
#ifndef CONFIG_BLACKFIN
/* NOTE: these offsets are all in bytes */
@@ -74,7 +55,7 @@ static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
{ __raw_writel(data, addr + offset); }
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
@@ -114,7 +95,7 @@ static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
{ __raw_writeb(data, addr + offset); }
-#endif /* CONFIG_USB_TUSB6010 */
+#endif /* CONFIG_USB_MUSB_TUSB6010 */
#else
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index de3b2f18db4..03f2655af29 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -72,6 +72,14 @@
#define MUSB_DEVCTL_HR 0x02
#define MUSB_DEVCTL_SESSION 0x01
+/* MUSB ULPI VBUSCONTROL */
+#define MUSB_ULPI_USE_EXTVBUS 0x01
+#define MUSB_ULPI_USE_EXTVBUSIND 0x02
+/* ULPI_REG_CONTROL */
+#define MUSB_ULPI_REG_REQ (1 << 0)
+#define MUSB_ULPI_REG_CMPLT (1 << 1)
+#define MUSB_ULPI_RDN_WR (1 << 2)
+
/* TESTMODE */
#define MUSB_TEST_FORCE_HOST 0x80
#define MUSB_TEST_FIFO_ACCESS 0x40
@@ -226,7 +234,8 @@
#define MUSB_TESTMODE 0x0F /* 8 bit */
/* Get offset for a given FIFO from musb->mregs */
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
#else
#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
@@ -246,6 +255,13 @@
/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
#define MUSB_HWVERS 0x6C /* 8 bit */
+#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
+#define MUSB_ULPI_INT_MASK 0x72 /* 8 bit */
+#define MUSB_ULPI_INT_SRC 0x73 /* 8 bit */
+#define MUSB_ULPI_REG_DATA 0x74 /* 8 bit */
+#define MUSB_ULPI_REG_ADDR 0x75 /* 8 bit */
+#define MUSB_ULPI_REG_CONTROL 0x76 /* 8 bit */
+#define MUSB_ULPI_RAW_DATA 0x77 /* 8 bit */
#define MUSB_EPINFO 0x78 /* 8 bit */
#define MUSB_RAMINFO 0x79 /* 8 bit */
@@ -280,7 +296,8 @@
#define MUSB_FLAT_OFFSET(_epnum, _offset) \
(0x100 + (0x10*(_epnum)) + (_offset))
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
/* TUSB6010 EP0 configuration register is special */
#define MUSB_TUSB_OFFSET(_epnum, _offset) \
(0x10 + _offset)
@@ -321,8 +338,39 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
musb_writew(mbase, MUSB_RXFIFOADD, c_off);
}
+static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val)
+{
+ musb_writeb(mbase, MUSB_ULPI_BUSCONTROL, val);
+}
+
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_TXFIFOSZ);
+}
+
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_TXFIFOADD);
+}
+
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_RXFIFOSZ);
+}
+
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_RXFIFOADD);
+}
+
+static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_ULPI_BUSCONTROL);
+}
+
static inline u8 musb_read_configdata(void __iomem *mbase)
{
+ musb_writeb(mbase, MUSB_INDEX, 0);
return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
}
@@ -375,6 +423,36 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
qh_h_port_reg);
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR));
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR));
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT));
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR));
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR));
+}
+
+static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT));
+}
+
#else /* CONFIG_BLACKFIN */
#define USB_BASE USB_FADDR
@@ -435,18 +513,9 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
#define MUSB_FLAT_OFFSET(_epnum, _offset) \
(USB_OFFSET(USB_EP_NI0_TXMAXP) + (0x40 * (_epnum)) + (_offset))
-/* Not implemented - HW has seperate Tx/Rx FIFO */
+/* Not implemented - HW has separate Tx/Rx FIFO */
#define MUSB_TXCSR_MODE 0x0000
-/*
- * Dummy stub for clk framework, it will be removed
- * until Blackfin supports clk framework
- */
-#define clk_get(dev, id) NULL
-#define clk_put(clock) do {} while (0)
-#define clk_enable(clock) do {} while (0)
-#define clk_disable(clock) do {} while (0)
-
static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size)
{
}
@@ -463,21 +532,54 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
{
}
-static inline u8 musb_read_configdata(void __iomem *mbase)
+static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val)
+{
+}
+
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
{
return 0;
}
-static inline u16 musb_read_hwvers(void __iomem *mbase)
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
{
return 0;
}
-static inline u16 musb_read_target_reg_base(u8 i, void __iomem *mbase)
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
{
return 0;
}
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+ return 0;
+}
+
+static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase)
+{
+ return 0;
+}
+
+static inline u8 musb_read_configdata(void __iomem *mbase)
+{
+ return 0;
+}
+
+static inline u16 musb_read_hwvers(void __iomem *mbase)
+{
+ /*
+ * This register is invisible on Blackfin, actually the MUSB
+ * RTL version of Blackfin is 1.9, so just harcode its value.
+ */
+ return MUSB_HWVERS_1900;
+}
+
+static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
+{
+ return NULL;
+}
+
static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
u8 qh_addr_req)
{
@@ -508,6 +610,36 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
{
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return 0;
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return 0;
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+ return 0;
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return 0;
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return 0;
+}
+
+static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+ return 0;
+}
+
#endif /* CONFIG_BLACKFIN */
#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index e0e9ce58417..e2d2d8c9891 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -35,9 +35,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/time.h>
#include <linux/timer.h>
@@ -45,9 +43,40 @@
#include "musb_core.h"
+void musb_host_finish_resume(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+ u8 power;
+
+ musb = container_of(work, struct musb, finish_resume_work.work);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ dev_dbg(musb->controller, "root port resume stopped, power %02x\n",
+ power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+
+ /*
+ * ISSUE: DaVinci (RTL 1.300) disconnects after
+ * resume of high speed peripherals (but not full
+ * speed ones).
+ */
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb->hcd);
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv->state = OTG_STATE_A_HOST;
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
-static void musb_port_suspend(struct musb *musb, bool do_suspend)
+void musb_port_suspend(struct musb *musb, bool do_suspend)
{
+ struct usb_otg *otg = musb->xceiv->otg;
u8 power;
void __iomem *mbase = musb->mregs;
@@ -75,53 +104,52 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
break;
}
- DBG(3, "Root port suspended, power %02x\n", power);
+ dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
musb->port1_status |= USB_PORT_STAT_SUSPEND;
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_HOST:
- musb->xceiv.state = OTG_STATE_A_SUSPEND;
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.host->b_hnp_enable;
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = otg->host->b_hnp_enable;
+ if (musb->is_active)
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_A_AIDL_BDIS));
musb_platform_try_idle(musb, 0);
break;
-#ifdef CONFIG_USB_MUSB_OTG
case OTG_STATE_B_HOST:
- musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv.host->b_hnp_enable;
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ musb->is_active = otg->host->b_hnp_enable;
musb_platform_try_idle(musb, 0);
break;
-#endif
default:
- DBG(1, "bogus rh suspend? %s\n",
- otg_state_string(musb));
+ dev_dbg(musb->controller, "bogus rh suspend? %s\n",
+ usb_otg_state_string(musb->xceiv->state));
}
} else if (power & MUSB_POWER_SUSPENDM) {
power &= ~MUSB_POWER_SUSPENDM;
power |= MUSB_POWER_RESUME;
musb_writeb(mbase, MUSB_POWER, power);
- DBG(3, "Root port resuming, power %02x\n", power);
+ dev_dbg(musb->controller, "Root port resuming, power %02x\n", power);
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
- musb->rh_timer = jiffies + msecs_to_jiffies(20);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(20));
}
}
-static void musb_port_reset(struct musb *musb, bool do_reset)
+void musb_port_reset(struct musb *musb, bool do_reset)
{
u8 power;
void __iomem *mbase = musb->mregs;
-#ifdef CONFIG_USB_MUSB_OTG
- if (musb->xceiv.state == OTG_STATE_B_IDLE) {
- DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
+ if (musb->xceiv->state == OTG_STATE_B_IDLE) {
+ dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
}
-#endif
if (!is_host_active(musb))
return;
@@ -131,7 +159,6 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
*/
power = musb_readb(mbase, MUSB_POWER);
if (do_reset) {
-
/*
* If RESUME is set, we must make sure it stays minimum 20 ms.
* Then we must clear RESUME and wait a bit to let musb start
@@ -140,31 +167,40 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
* detected".
*/
if (power & MUSB_POWER_RESUME) {
- while (time_before(jiffies, musb->rh_timer))
- msleep(1);
+ long remain = (unsigned long) musb->rh_timer - jiffies;
+
+ if (musb->rh_timer > 0 && remain > 0) {
+ /* take into account the minimum delay after resume */
+ schedule_delayed_work(
+ &musb->deassert_reset_work, remain);
+ return;
+ }
+
musb_writeb(mbase, MUSB_POWER,
- power & ~MUSB_POWER_RESUME);
- msleep(1);
+ power & ~MUSB_POWER_RESUME);
+
+ /* Give the core 1 ms to clear MUSB_POWER_RESUME */
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(1));
+ return;
}
- musb->ignore_disconnect = true;
power &= 0xf0;
musb_writeb(mbase, MUSB_POWER,
power | MUSB_POWER_RESET);
musb->port1_status |= USB_PORT_STAT_RESET;
musb->port1_status &= ~USB_PORT_STAT_ENABLE;
- musb->rh_timer = jiffies + msecs_to_jiffies(50);
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(50));
} else {
- DBG(4, "root port reset stopped\n");
+ dev_dbg(musb->controller, "root port reset stopped\n");
musb_writeb(mbase, MUSB_POWER,
power & ~MUSB_POWER_RESET);
- musb->ignore_disconnect = false;
-
power = musb_readb(mbase, MUSB_POWER);
if (power & MUSB_POWER_HSMODE) {
- DBG(4, "high-speed device connected\n");
+ dev_dbg(musb->controller, "high-speed device connected\n");
musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
}
@@ -172,7 +208,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
musb->port1_status |= USB_PORT_STAT_ENABLE
| (USB_PORT_STAT_C_RESET << 16)
| (USB_PORT_STAT_C_ENABLE << 16);
- usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ usb_hcd_poll_rh_status(musb->hcd);
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
}
@@ -180,23 +216,32 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
void musb_root_disconnect(struct musb *musb)
{
- musb->port1_status = (1 << USB_PORT_FEAT_POWER)
- | (1 << USB_PORT_FEAT_C_CONNECTION);
+ struct usb_otg *otg = musb->xceiv->otg;
- usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ musb->port1_status = USB_PORT_STAT_POWER
+ | (USB_PORT_STAT_C_CONNECTION << 16);
+
+ usb_hcd_poll_rh_status(musb->hcd);
musb->is_active = 0;
- switch (musb->xceiv.state) {
- case OTG_STATE_A_HOST:
+ switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ if (otg->host->b_hnp_enable) {
+ musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
+ musb->g.is_a_peripheral = 1;
+ break;
+ }
+ /* FALLTHROUGH */
+ case OTG_STATE_A_HOST:
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
break;
default:
- DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
+ dev_dbg(musb->controller, "host disconnect (%s)\n",
+ usb_otg_state_string(musb->xceiv->state));
}
}
@@ -217,6 +262,23 @@ int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
return retval;
}
+static int musb_has_gadget(struct musb *musb)
+{
+ /*
+ * In host-only mode we start a connection right away. In OTG mode
+ * we have to wait until we loaded a gadget. We don't really need a
+ * gadget if we operate as a host but we should not start a session
+ * as a device without a gadget or else we explode.
+ */
+#ifdef CONFIG_USB_MUSB_HOST
+ return 1;
+#else
+ if (musb->port_mode == MUSB_PORT_MODE_HOST)
+ return 1;
+ return musb->g.dev.driver != NULL;
+#endif
+}
+
int musb_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
@@ -232,7 +294,7 @@ int musb_hub_control(
spin_lock_irqsave(&musb->lock, flags);
- if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
spin_unlock_irqrestore(&musb->lock, flags);
return -ESHUTDOWN;
}
@@ -263,8 +325,8 @@ int musb_hub_control(
musb_port_suspend(musb, false);
break;
case USB_PORT_FEAT_POWER:
- if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
- musb_set_vbus(musb, 0);
+ if (!hcd->self.is_b_host)
+ musb_platform_set_vbus(musb, 0);
break;
case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_ENABLE:
@@ -275,7 +337,7 @@ int musb_hub_control(
default:
goto error;
}
- DBG(5, "clear feature %d\n", wValue);
+ dev_dbg(musb->controller, "clear feature %d\n", wValue);
musb->port1_status &= ~(1 << wValue);
break;
case GetHubDescriptor:
@@ -285,7 +347,7 @@ int musb_hub_control(
desc->bDescLength = 9;
desc->bDescriptorType = 0x29;
desc->bNbrPorts = 1;
- desc->wHubCharacteristics = __constant_cpu_to_le16(
+ desc->wHubCharacteristics = cpu_to_le16(
0x0001 /* per-port power switching */
| 0x0010 /* no overcurrent reporting */
);
@@ -293,8 +355,8 @@ int musb_hub_control(
desc->bHubContrCurrent = 0;
/* workaround bogus struct definition */
- desc->DeviceRemovable[0] = 0x02; /* port 1 */
- desc->DeviceRemovable[1] = 0xff;
+ desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */
+ desc->u.hs.DeviceRemovable[1] = 0xff;
}
break;
case GetHubStatus:
@@ -305,42 +367,12 @@ int musb_hub_control(
if (wIndex != 1)
goto error;
- /* finish RESET signaling? */
- if ((musb->port1_status & USB_PORT_STAT_RESET)
- && time_after_eq(jiffies, musb->rh_timer))
- musb_port_reset(musb, false);
-
- /* finish RESUME signaling? */
- if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
- && time_after_eq(jiffies, musb->rh_timer)) {
- u8 power;
-
- power = musb_readb(musb->mregs, MUSB_POWER);
- power &= ~MUSB_POWER_RESUME;
- DBG(4, "root port resume stopped, power %02x\n",
- power);
- musb_writeb(musb->mregs, MUSB_POWER, power);
-
- /* ISSUE: DaVinci (RTL 1.300) disconnects after
- * resume of high speed peripherals (but not full
- * speed ones).
- */
-
- musb->is_active = 1;
- musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
- | MUSB_PORT_STAT_RESUME);
- musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
- usb_hcd_poll_rh_status(musb_to_hcd(musb));
- /* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv.state = OTG_STATE_A_HOST;
- }
-
put_unaligned(cpu_to_le32(musb->port1_status
& ~MUSB_PORT_STAT_RESUME),
(__le32 *) buf);
/* port change status is more interesting */
- DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
+ dev_dbg(musb->controller, "port status %08x\n",
musb->port1_status);
break;
case SetPortFeature:
@@ -359,7 +391,7 @@ int musb_hub_control(
* initialization logic, e.g. for OTG, or change any
* logic relating to VBUS power-up.
*/
- if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+ if (!hcd->self.is_b_host && musb_has_gadget(musb))
musb_start(musb);
break;
case USB_PORT_FEAT_RESET:
@@ -411,7 +443,7 @@ int musb_hub_control(
default:
goto error;
}
- DBG(5, "set feature %d\n", wValue);
+ dev_dbg(musb->controller, "set feature %d\n", wValue);
musb->port1_status |= 1 << wValue;
break;
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 8662e9e159c..e8e9f9aab20 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -33,21 +33,14 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "musb_core.h"
#include "musbhsdma.h"
-static int dma_controller_start(struct dma_controller *c)
-{
- /* nothing to do */
- return 0;
-}
-
static void dma_channel_release(struct dma_channel *channel);
-static int dma_controller_stop(struct dma_controller *c)
+static void dma_controller_stop(struct musb_dma_controller *controller)
{
- struct musb_dma_controller *controller = container_of(c,
- struct musb_dma_controller, controller);
struct musb *musb = controller->private_data;
struct dma_channel *channel;
u8 bit;
@@ -66,8 +59,6 @@ static int dma_controller_stop(struct dma_controller *c)
}
}
}
-
- return 0;
}
static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
@@ -90,7 +81,7 @@ static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
channel = &(musb_channel->channel);
channel->private_data = musb_channel;
channel->status = MUSB_DMA_STATUS_FREE;
- channel->max_len = 0x10000;
+ channel->max_len = 0x100000;
/* Tx => mode 1; Rx => mode 0 */
channel->desired_mode = transmit;
channel->actual_len = 0;
@@ -121,28 +112,20 @@ static void configure_channel(struct dma_channel *channel,
{
struct musb_dma_channel *musb_channel = channel->private_data;
struct musb_dma_controller *controller = musb_channel->controller;
+ struct musb *musb = controller->private_data;
void __iomem *mbase = controller->base;
u8 bchannel = musb_channel->idx;
u16 csr = 0;
- DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+ dev_dbg(musb->controller, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
channel, packet_sz, dma_addr, len, mode);
if (mode) {
csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
BUG_ON(len < packet_sz);
-
- if (packet_sz >= 64) {
- csr |= MUSB_HSDMA_BURSTMODE_INCR16
- << MUSB_HSDMA_BURSTMODE_SHIFT;
- } else if (packet_sz >= 32) {
- csr |= MUSB_HSDMA_BURSTMODE_INCR8
- << MUSB_HSDMA_BURSTMODE_SHIFT;
- } else if (packet_sz >= 16) {
- csr |= MUSB_HSDMA_BURSTMODE_INCR4
- << MUSB_HSDMA_BURSTMODE_SHIFT;
- }
}
+ csr |= MUSB_HSDMA_BURSTMODE_INCR16
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
| (1 << MUSB_HSDMA_ENABLE_SHIFT)
@@ -166,8 +149,10 @@ static int dma_channel_program(struct dma_channel *channel,
dma_addr_t dma_addr, u32 len)
{
struct musb_dma_channel *musb_channel = channel->private_data;
+ struct musb_dma_controller *controller = musb_channel->controller;
+ struct musb *musb = controller->private_data;
- DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+ dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
musb_channel->epnum,
musb_channel->transmit ? "Tx" : "Rx",
packet_sz, dma_addr, len, mode);
@@ -175,16 +160,33 @@ static int dma_channel_program(struct dma_channel *channel,
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
+ /* Let targets check/tweak the arguments */
+ if (musb->ops->adjust_channel_params) {
+ int ret = musb->ops->adjust_channel_params(channel,
+ packet_sz, &mode, &dma_addr, &len);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * The DMA engine in RTL1.8 and above cannot handle
+ * DMA addresses that are not aligned to a 4 byte boundary.
+ * It ends up masking the last two bits of the address
+ * programmed in DMA_ADDR.
+ *
+ * Fail such DMA transfers, so that the backup PIO mode
+ * can carry out the transfer
+ */
+ if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
+ return false;
+
channel->actual_len = 0;
musb_channel->start_addr = dma_addr;
musb_channel->len = len;
musb_channel->max_packet_sz = packet_sz;
channel->status = MUSB_DMA_STATUS_BUSY;
- if ((mode == 1) && (len >= packet_sz))
- configure_channel(channel, packet_sz, 1, dma_addr, len);
- else
- configure_channel(channel, packet_sz, 0, dma_addr, len);
+ configure_channel(channel, packet_sz, mode, dma_addr, len);
return true;
}
@@ -195,30 +197,32 @@ static int dma_channel_abort(struct dma_channel *channel)
void __iomem *mbase = musb_channel->controller->base;
u8 bchannel = musb_channel->idx;
+ int offset;
u16 csr;
if (channel->status == MUSB_DMA_STATUS_BUSY) {
if (musb_channel->transmit) {
-
- csr = musb_readw(mbase,
- MUSB_EP_OFFSET(musb_channel->epnum,
- MUSB_TXCSR));
- csr &= ~(MUSB_TXCSR_AUTOSET |
- MUSB_TXCSR_DMAENAB |
- MUSB_TXCSR_DMAMODE);
- musb_writew(mbase,
- MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR),
- csr);
+ offset = MUSB_EP_OFFSET(musb_channel->epnum,
+ MUSB_TXCSR);
+
+ /*
+ * The programming guide says that we must clear
+ * the DMAENAB bit before the DMAMODE bit...
+ */
+ csr = musb_readw(mbase, offset);
+ csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
+ musb_writew(mbase, offset, csr);
+ csr &= ~MUSB_TXCSR_DMAMODE;
+ musb_writew(mbase, offset, csr);
} else {
- csr = musb_readw(mbase,
- MUSB_EP_OFFSET(musb_channel->epnum,
- MUSB_RXCSR));
+ offset = MUSB_EP_OFFSET(musb_channel->epnum,
+ MUSB_RXCSR);
+
+ csr = musb_readw(mbase, offset);
csr &= ~(MUSB_RXCSR_AUTOCLEAR |
MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_DMAMODE);
- musb_writew(mbase,
- MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR),
- csr);
+ musb_writew(mbase, offset, csr);
}
musb_writew(mbase,
@@ -248,14 +252,38 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
u8 bchannel;
u8 int_hsdma;
- u32 addr;
+ u32 addr, count;
u16 csr;
spin_lock_irqsave(&musb->lock, flags);
int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
- if (!int_hsdma)
- goto done;
+
+#ifdef CONFIG_BLACKFIN
+ /* Clear DMA interrupt flags */
+ musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
+#endif
+
+ if (!int_hsdma) {
+ dev_dbg(musb->controller, "spurious DMA irq\n");
+
+ for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
+ musb_channel = (struct musb_dma_channel *)
+ &(controller->channel[bchannel]);
+ channel = &musb_channel->channel;
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ count = musb_read_hsdma_count(mbase, bchannel);
+
+ if (count == 0)
+ int_hsdma |= (1 << bchannel);
+ }
+ }
+
+ dev_dbg(musb->controller, "int_hsdma = 0x%x\n", int_hsdma);
+
+ if (!int_hsdma)
+ goto done;
+ }
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
if (int_hsdma & (1 << bchannel)) {
@@ -278,7 +306,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
channel->actual_len = addr
- musb_channel->start_addr;
- DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+ dev_dbg(musb->controller, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
channel, musb_channel->start_addr,
addr, channel->actual_len,
musb_channel->len,
@@ -296,29 +324,32 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
&& ((channel->desired_mode == 0)
|| (channel->actual_len &
(musb_channel->max_packet_sz - 1)))
- ) {
+ ) {
+ u8 epnum = musb_channel->epnum;
+ int offset = MUSB_EP_OFFSET(epnum,
+ MUSB_TXCSR);
+ u16 txcsr;
+
+ /*
+ * The programming guide says that we
+ * must clear DMAENAB before DMAMODE.
+ */
+ musb_ep_select(mbase, epnum);
+ txcsr = musb_readw(mbase, offset);
+ txcsr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_AUTOSET);
+ musb_writew(mbase, offset, txcsr);
/* Send out the packet */
- musb_ep_select(mbase,
- musb_channel->epnum);
- musb_writew(mbase, MUSB_EP_OFFSET(
- musb_channel->epnum,
- MUSB_TXCSR),
- MUSB_TXCSR_TXPKTRDY);
- } else {
- musb_dma_completion(
- musb,
- musb_channel->epnum,
- musb_channel->transmit);
+ txcsr &= ~MUSB_TXCSR_DMAMODE;
+ txcsr |= MUSB_TXCSR_TXPKTRDY;
+ musb_writew(mbase, offset, txcsr);
}
+ musb_dma_completion(musb, musb_channel->epnum,
+ musb_channel->transmit);
}
}
}
-#ifdef CONFIG_BLACKFIN
- /* Clear DMA interrup flags */
- musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
-#endif
-
retval = IRQ_HANDLED;
done:
spin_unlock_irqrestore(&musb->lock, flags);
@@ -330,8 +361,7 @@ void dma_controller_destroy(struct dma_controller *c)
struct musb_dma_controller *controller = container_of(c,
struct musb_dma_controller, controller);
- if (!controller)
- return;
+ dma_controller_stop(controller);
if (controller->irq)
free_irq(controller->irq, c);
@@ -339,15 +369,14 @@ void dma_controller_destroy(struct dma_controller *c)
kfree(controller);
}
-struct dma_controller *__init
-dma_controller_create(struct musb *musb, void __iomem *base)
+struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
struct device *dev = musb->controller;
struct platform_device *pdev = to_platform_device(dev);
- int irq = platform_get_irq(pdev, 1);
+ int irq = platform_get_irq_byname(pdev, "dma");
- if (irq == 0) {
+ if (irq <= 0) {
dev_err(dev, "No DMA interrupt line!\n");
return NULL;
}
@@ -360,14 +389,12 @@ dma_controller_create(struct musb *musb, void __iomem *base)
controller->private_data = musb;
controller->base = base;
- controller->controller.start = dma_controller_start;
- controller->controller.stop = dma_controller_stop;
controller->controller.channel_alloc = dma_channel_allocate;
controller->controller.channel_release = dma_channel_release;
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
- if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+ if (request_irq(irq, dma_controller_irq, 0,
dev_name(musb->controller), &controller->controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 1299d92dc83..f7b13fd2525 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -31,10 +31,6 @@
*
*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include "omap2430.h"
-#endif
-
#ifndef CONFIG_BLACKFIN
#define MUSB_HSDMA_BASE 0x200
@@ -55,6 +51,10 @@
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
addr)
+#define musb_read_hsdma_count(mbase, bchannel) \
+ musb_readl(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
+
#define musb_write_hsdma_count(mbase, bchannel, len) \
musb_writel(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
@@ -90,21 +90,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
{
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
- ((u16)((u32) dma_addr & 0xFFFF)));
+ dma_addr);
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
- ((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+ (dma_addr >> 16));
+}
+
+static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
+{
+ u32 count = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+ count = count << 16;
+
+ count |= musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+ return count;
}
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
musb_writew(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),
- ((u16)((u32) len & 0xFFFF)));
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
- ((u16)(((u32) len >> 16) & 0xFFFF)));
+ (len >> 16));
}
#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 901dffdf23b..d369bf1f393 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -3,7 +3,6 @@
* Some code has been taken from tusb6010.c
* Copyrights for that are attributable to:
* Copyright (C) 2006 Nokia Corporation
- * Jarkko Nikula <jarkko.nikula@nokia.com>
* Tony Lindgren <tony@atomide.com>
*
* This file is part of the Inventra Controller Driver for Linux.
@@ -28,24 +27,32 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/clk.h>
#include <linux/io.h>
-
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <mach/mux.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/usb/musb-omap.h>
+#include <linux/phy/omap_control_phy.h>
+#include <linux/of_platform.h>
#include "musb_core.h"
#include "omap2430.h"
-#ifdef CONFIG_ARCH_OMAP3430
-#define get_cpu_rev() 2
-#endif
+struct omap2430_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ enum omap_musb_vbus_id_status status;
+ struct work_struct omap_musb_mailbox_work;
+ struct device *control_otghs;
+};
+#define glue_to_musb(g) platform_get_drvdata(g->musb)
-#define MUSB_TIMEOUT_A_WAIT_BCON 1100
+static struct omap2430_glue *_glue;
static struct timer_list musb_idle_timer;
@@ -53,55 +60,45 @@ static void musb_do_idle(unsigned long _musb)
{
struct musb *musb = (void *)_musb;
unsigned long flags;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
u8 power;
-#endif
u8 devctl;
spin_lock_irqsave(&musb->lock, flags);
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_BCON:
- devctl &= ~MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
break;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_SUSPEND:
/* finish RESUME signaling? */
if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
power = musb_readb(musb->mregs, MUSB_POWER);
power &= ~MUSB_POWER_RESUME;
- DBG(1, "root port resume stopped, power %02x\n", power);
+ dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
musb_writeb(musb->mregs, MUSB_POWER, power);
musb->is_active = 1;
musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
| MUSB_PORT_STAT_RESUME);
musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
- usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ usb_hcd_poll_rh_status(musb->hcd);
/* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->xceiv->state = OTG_STATE_A_HOST;
}
break;
-#endif
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_HOST:
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
else
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
-#endif
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
default:
break;
}
@@ -109,7 +106,7 @@ static void musb_do_idle(unsigned long _musb)
}
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
{
unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
static unsigned long last_timer;
@@ -119,8 +116,9 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
- DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->state));
del_timer(&musb_idle_timer);
last_timer = jiffies;
return;
@@ -130,31 +128,23 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
if (!timer_pending(&musb_idle_timer))
last_timer = timeout;
else {
- DBG(4, "Longer idle timer already pending, ignoring\n");
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
return;
}
}
last_timer = timeout;
- DBG(4, "%s inactive, for idle timer for %lu ms\n",
- otg_state_string(musb),
+ dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
+ usb_otg_state_string(musb->xceiv->state),
(unsigned long)jiffies_to_msecs(timeout - jiffies));
mod_timer(&musb_idle_timer, timeout);
}
-void musb_platform_enable(struct musb *musb)
-{
-}
-void musb_platform_disable(struct musb *musb)
-{
-}
-static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
-{
-}
-
-static void omap_set_vbus(struct musb *musb, int is_on)
+static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
{
+ struct usb_otg *otg = musb->xceiv->otg;
u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
/* HDRC controls CPEN, but beware current surges during device
* connect. They can trigger transient overcurrent conditions
* that must be ignored.
@@ -163,12 +153,36 @@ static void omap_set_vbus(struct musb *musb, int is_on)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (is_on) {
- musb->is_active = 1;
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
- devctl |= MUSB_DEVCTL_SESSION;
-
- MUSB_HST_MODE(musb);
+ if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+ int loops = 100;
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ /*
+ * Wait for the musb to set as A device to enable the
+ * VBUS
+ */
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
+
+ mdelay(5);
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)
+ || loops-- <= 0) {
+ dev_err(musb->controller,
+ "configured as A device timeout");
+ break;
+ }
+ }
+
+ otg_set_vbus(otg, 1);
+ } else {
+ musb->is_active = 1;
+ otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ }
} else {
musb->is_active = 0;
@@ -176,160 +190,547 @@ static void omap_set_vbus(struct musb *musb, int is_on)
* jumping right to B_IDLE...
*/
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ otg->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
devctl &= ~MUSB_DEVCTL_SESSION;
MUSB_DEV_MODE(musb);
}
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- DBG(1, "VBUS %s, devctl %02x "
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x "
/* otg %3x conf %08x prcm %08x */ "\n",
- otg_state_string(musb),
+ usb_otg_state_string(musb->xceiv->state),
musb_readb(musb->mregs, MUSB_DEVCTL));
}
-static int omap_set_power(struct otg_transceiver *x, unsigned mA)
+
+static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
{
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
return 0;
}
-static int musb_platform_resume(struct musb *musb);
+static inline void omap2430_low_level_exit(struct musb *musb)
+{
+ u32 l;
-int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+ /* in any role */
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
+ l |= ENABLEFORCE; /* enable MSTANDBY */
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
+}
+
+static inline void omap2430_low_level_init(struct musb *musb)
{
- u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ u32 l;
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
+ l &= ~ENABLEFORCE; /* disable MSTANDBY */
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
+}
+
+void omap_musb_mailbox(enum omap_musb_vbus_id_status status)
+{
+ struct omap2430_glue *glue = _glue;
+
+ if (!glue) {
+ pr_err("%s: musb core is not yet initialized\n", __func__);
+ return;
+ }
+ glue->status = status;
+
+ if (!glue_to_musb(glue)) {
+ pr_err("%s: musb core is not yet ready\n", __func__);
+ return;
+ }
- switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- case MUSB_HOST:
- otg_set_host(&musb->xceiv, musb->xceiv.host);
+ schedule_work(&glue->omap_musb_mailbox_work);
+}
+EXPORT_SYMBOL_GPL(omap_musb_mailbox);
+
+static void omap_musb_set_mailbox(struct omap2430_glue *glue)
+{
+ struct musb *musb = glue_to_musb(glue);
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = pdata->board_data;
+ struct usb_otg *otg = musb->xceiv->otg;
+
+ switch (glue->status) {
+ case OMAP_MUSB_ID_GROUND:
+ dev_dbg(dev, "ID GND\n");
+
+ otg->default_a = true;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ musb->xceiv->last_event = USB_EVENT_ID;
+ if (musb->gadget_driver) {
+ pm_runtime_get_sync(dev);
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_HOST);
+ omap2430_musb_set_vbus(musb, 1);
+ }
break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- case MUSB_PERIPHERAL:
- otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
+
+ case OMAP_MUSB_VBUS_VALID:
+ dev_dbg(dev, "VBUS Connect\n");
+
+ otg->default_a = false;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ musb->xceiv->last_event = USB_EVENT_VBUS;
+ if (musb->gadget_driver)
+ pm_runtime_get_sync(dev);
+ omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
- case MUSB_OTG:
+
+ case OMAP_MUSB_ID_FLOAT:
+ case OMAP_MUSB_VBUS_OFF:
+ dev_dbg(dev, "VBUS Disconnect\n");
+
+ musb->xceiv->last_event = USB_EVENT_NONE;
+ if (musb->gadget_driver) {
+ omap2430_musb_set_vbus(musb, 0);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ if (data->interface_type == MUSB_INTERFACE_UTMI)
+ otg_set_vbus(musb->xceiv->otg, 0);
+
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_DISCONNECT);
break;
-#endif
default:
- return -EINVAL;
+ dev_dbg(dev, "ID float\n");
}
- return 0;
+
+ atomic_notifier_call_chain(&musb->xceiv->notifier,
+ musb->xceiv->last_event, NULL);
}
-int __init musb_platform_init(struct musb *musb)
+
+static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
+{
+ struct omap2430_glue *glue = container_of(mailbox_work,
+ struct omap2430_glue, omap_musb_mailbox_work);
+ struct musb *musb = glue_to_musb(glue);
+ struct device *dev = musb->controller;
+
+ pm_runtime_get_sync(dev);
+ omap_musb_set_mailbox(glue);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static int omap2430_musb_init(struct musb *musb)
{
u32 l;
+ int status = 0;
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = plat->board_data;
+
+ /* We require some kind of external transceiver, hooked
+ * up through ULPI. TWL4030-family PMICs include one,
+ * which needs a driver, drivers aren't always needed.
+ */
+ if (dev->parent->of_node) {
+ musb->phy = devm_phy_get(dev->parent, "usb2-phy");
+
+ /* We can't totally remove musb->xceiv as of now because
+ * musb core uses xceiv.state and xceiv.otg. Once we have
+ * a separate state machine to handle otg, these can be moved
+ * out of xceiv and then we can start using the generic PHY
+ * framework
+ */
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent,
+ "usb-phy", 0);
+ } else {
+ musb->xceiv = devm_usb_get_phy_dev(dev, 0);
+ musb->phy = devm_phy_get(dev, "usb");
+ }
-#if defined(CONFIG_ARCH_OMAP2430)
- omap_cfg_reg(AE5_2430_USB0HS_STP);
-#endif
+ if (IS_ERR(musb->xceiv)) {
+ status = PTR_ERR(musb->xceiv);
- musb_platform_resume(musb);
+ if (status == -ENXIO)
+ return status;
- l = omap_readl(OTG_SYSCONFIG);
- l &= ~ENABLEWAKEUP; /* disable wakeup */
- l &= ~NOSTDBY; /* remove possible nostdby */
- l |= SMARTSTDBY; /* enable smart standby */
- l &= ~AUTOIDLE; /* disable auto idle */
- l &= ~NOIDLE; /* remove possible noidle */
- l |= SMARTIDLE; /* enable smart idle */
- l |= AUTOIDLE; /* enable auto idle */
- omap_writel(l, OTG_SYSCONFIG);
+ pr_err("HS USB OTG: no transceiver configured\n");
+ return -EPROBE_DEFER;
+ }
- l = omap_readl(OTG_INTERFSEL);
- l |= ULPI_12PIN;
- omap_writel(l, OTG_INTERFSEL);
+ if (IS_ERR(musb->phy)) {
+ pr_err("HS USB OTG: no PHY configured\n");
+ return PTR_ERR(musb->phy);
+ }
+ musb->isr = omap2430_musb_interrupt;
+
+ status = pm_runtime_get_sync(dev);
+ if (status < 0) {
+ dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
+ goto err1;
+ }
+
+ l = musb_readl(musb->mregs, OTG_INTERFSEL);
+
+ if (data->interface_type == MUSB_INTERFACE_UTMI) {
+ /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */
+ l &= ~ULPI_12PIN; /* Disable ULPI */
+ l |= UTMI_8BIT; /* Enable UTMI */
+ } else {
+ l |= ULPI_12PIN;
+ }
+
+ musb_writel(musb->mregs, OTG_INTERFSEL, l);
pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
"sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
- omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
- omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
- omap_readl(OTG_SIMENABLE));
+ musb_readl(musb->mregs, OTG_REVISION),
+ musb_readl(musb->mregs, OTG_SYSCONFIG),
+ musb_readl(musb->mregs, OTG_SYSSTATUS),
+ musb_readl(musb->mregs, OTG_INTERFSEL),
+ musb_readl(musb->mregs, OTG_SIMENABLE));
- omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
- if (is_host_enabled(musb))
- musb->board_set_vbus = omap_set_vbus;
- if (is_peripheral_enabled(musb))
- musb->xceiv.set_power = omap_set_power;
- musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
+ if (glue->status != OMAP_MUSB_UNKNOWN)
+ omap_musb_set_mailbox(glue);
- setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+ phy_init(musb->phy);
+ phy_power_on(musb->phy);
+ pm_runtime_put_noidle(musb->controller);
return 0;
+
+err1:
+ return status;
}
-int musb_platform_suspend(struct musb *musb)
+static void omap2430_musb_enable(struct musb *musb)
{
- u32 l;
+ u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = pdata->board_data;
+
+ switch (glue->status) {
+
+ case OMAP_MUSB_ID_GROUND:
+ omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST);
+ if (data->interface_type != MUSB_INTERFACE_UTMI)
+ break;
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) &
+ MUSB_DEVCTL_BDEVICE) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "configured as A device timeout");
+ break;
+ }
+ }
+ break;
- if (!musb->clock)
- return 0;
+ case OMAP_MUSB_VBUS_VALID:
+ omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
+ break;
- /* in any role */
- l = omap_readl(OTG_FORCESTDBY);
- l |= ENABLEFORCE; /* enable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ default:
+ break;
+ }
+}
+
+static void omap2430_musb_disable(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- l = omap_readl(OTG_SYSCONFIG);
- l |= ENABLEWAKEUP; /* enable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ if (glue->status != OMAP_MUSB_UNKNOWN)
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_DISCONNECT);
+}
- if (musb->xceiv.set_suspend)
- musb->xceiv.set_suspend(&musb->xceiv, 1);
+static int omap2430_musb_exit(struct musb *musb)
+{
+ del_timer_sync(&musb_idle_timer);
- if (musb->set_clock)
- musb->set_clock(musb->clock, 0);
- else
- clk_disable(musb->clock);
+ omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
return 0;
}
-static int musb_platform_resume(struct musb *musb)
+static const struct musb_platform_ops omap2430_ops = {
+ .init = omap2430_musb_init,
+ .exit = omap2430_musb_exit,
+
+ .set_mode = omap2430_musb_set_mode,
+ .try_idle = omap2430_musb_try_idle,
+
+ .set_vbus = omap2430_musb_set_vbus,
+
+ .enable = omap2430_musb_enable,
+ .disable = omap2430_musb_disable,
+};
+
+static u64 omap2430_dmamask = DMA_BIT_MASK(32);
+
+static int omap2430_probe(struct platform_device *pdev)
{
- u32 l;
+ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct omap_musb_board_data *data;
+ struct platform_device *musb;
+ struct omap2430_glue *glue;
+ struct device_node *np = pdev->dev.of_node;
+ struct musb_hdrc_config *config;
+ int ret = -ENOMEM;
+
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "failed to allocate glue context\n");
+ goto err0;
+ }
- if (!musb->clock)
- return 0;
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ goto err0;
+ }
- if (musb->xceiv.set_suspend)
- musb->xceiv.set_suspend(&musb->xceiv, 0);
+ musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = &omap2430_dmamask;
+ musb->dev.coherent_dma_mask = omap2430_dmamask;
- if (musb->set_clock)
- musb->set_clock(musb->clock, 1);
- else
- clk_enable(musb->clock);
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+ glue->status = OMAP_MUSB_UNKNOWN;
+ glue->control_otghs = ERR_PTR(-ENODEV);
- l = omap_readl(OTG_SYSCONFIG);
- l &= ~ENABLEWAKEUP; /* disable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ if (np) {
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
- l = omap_readl(OTG_FORCESTDBY);
- l &= ~ENABLEFORCE; /* disable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev,
+ "failed to allocate musb platform data\n");
+ goto err2;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev,
+ "failed to allocate musb board data\n");
+ goto err2;
+ }
+
+ config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ dev_err(&pdev->dev,
+ "failed to allocate musb hdrc config\n");
+ goto err2;
+ }
+
+ of_property_read_u32(np, "mode", (u32 *)&pdata->mode);
+ of_property_read_u32(np, "interface-type",
+ (u32 *)&data->interface_type);
+ of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
+ of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
+ of_property_read_u32(np, "power", (u32 *)&pdata->power);
+ config->multipoint = of_property_read_bool(np, "multipoint");
+
+ pdata->board_data = data;
+ pdata->config = config;
+
+ control_node = of_parse_phandle(np, "ctrl-module", 0);
+ if (control_node) {
+ control_pdev = of_find_device_by_node(control_node);
+ if (!control_pdev) {
+ dev_err(&pdev->dev, "Failed to get control device\n");
+ ret = -EINVAL;
+ goto err2;
+ }
+ glue->control_otghs = &control_pdev->dev;
+ }
+ }
+ pdata->platform_ops = &omap2430_ops;
+
+ platform_set_drvdata(pdev, glue);
+
+ /*
+ * REVISIT if we ever have two instances of the wrapper, we will be
+ * in big trouble
+ */
+ _glue = glue;
+
+ INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
+ ret = platform_device_add_resources(musb, musb_resources,
+ ARRAY_SIZE(musb_resources));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto err2;
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+ goto err2;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ platform_device_put(musb);
+
+err0:
+ return ret;
+}
+
+static int omap2430_remove(struct platform_device *pdev)
+{
+ struct omap2430_glue *glue = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&glue->omap_musb_mailbox_work);
+ platform_device_unregister(glue->musb);
return 0;
}
+#ifdef CONFIG_PM
-int musb_platform_exit(struct musb *musb)
+static int omap2430_runtime_suspend(struct device *dev)
{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
- omap_vbus_power(musb, 0 /*off*/, 1);
+ if (musb) {
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
- musb_platform_suspend(musb);
+ omap2430_low_level_exit(musb);
+ }
- clk_put(musb->clock);
- musb->clock = 0;
+ return 0;
+}
+
+static int omap2430_runtime_resume(struct device *dev)
+{
+ struct omap2430_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ if (musb) {
+ omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
+ }
return 0;
}
+
+static struct dev_pm_ops omap2430_pm_ops = {
+ .runtime_suspend = omap2430_runtime_suspend,
+ .runtime_resume = omap2430_runtime_resume,
+};
+
+#define DEV_PM_OPS (&omap2430_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap2430_id_table[] = {
+ {
+ .compatible = "ti,omap4-musb"
+ },
+ {
+ .compatible = "ti,omap3-musb"
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2430_id_table);
+#endif
+
+static struct platform_driver omap2430_driver = {
+ .probe = omap2430_probe,
+ .remove = omap2430_remove,
+ .driver = {
+ .name = "musb-omap2430",
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(omap2430_id_table),
+ },
+};
+
+MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+static int __init omap2430_init(void)
+{
+ return platform_driver_register(&omap2430_driver);
+}
+subsys_initcall(omap2430_init);
+
+static void __exit omap2430_exit(void)
+{
+ platform_driver_unregister(&omap2430_driver);
+}
+module_exit(omap2430_exit);
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index dc7670718cd..1b5e83a9840 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -10,47 +10,43 @@
#ifndef __MUSB_OMAP243X_H__
#define __MUSB_OMAP243X_H__
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include <mach/hardware.h>
-#include <mach/usb.h>
+#include <linux/platform_data/usb-omap.h>
/*
* OMAP2430-specific definitions
*/
-#define MENTOR_BASE_OFFSET 0
-#if defined(CONFIG_ARCH_OMAP2430)
-#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
-#elif defined(CONFIG_ARCH_OMAP3430)
-#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
-#endif
-#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
-#define OTG_REVISION OMAP_HSOTG(0x0)
-#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
+#define OTG_REVISION 0x400
+
+#define OTG_SYSCONFIG 0x404
# define MIDLEMODE 12 /* bit position */
# define FORCESTDBY (0 << MIDLEMODE)
# define NOSTDBY (1 << MIDLEMODE)
# define SMARTSTDBY (2 << MIDLEMODE)
+
# define SIDLEMODE 3 /* bit position */
# define FORCEIDLE (0 << SIDLEMODE)
# define NOIDLE (1 << SIDLEMODE)
# define SMARTIDLE (2 << SIDLEMODE)
+
# define ENABLEWAKEUP (1 << 2)
# define SOFTRST (1 << 1)
# define AUTOIDLE (1 << 0)
-#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
+
+#define OTG_SYSSTATUS 0x408
# define RESETDONE (1 << 0)
-#define OTG_INTERFSEL OMAP_HSOTG(0xc)
+
+#define OTG_INTERFSEL 0x40c
# define EXTCP (1 << 2)
-# define PHYSEL 0 /* bit position */
+# define PHYSEL 0 /* bit position */
# define UTMI_8BIT (0 << PHYSEL)
# define ULPI_12PIN (1 << PHYSEL)
# define ULPI_8PIN (2 << PHYSEL)
-#define OTG_SIMENABLE OMAP_HSOTG(0x10)
+
+#define OTG_SIMENABLE 0x410
# define TM1 (1 << 0)
-#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
-# define ENABLEFORCE (1 << 0)
-#endif /* CONFIG_ARCH_OMAP2430 */
+#define OTG_FORCESTDBY 0x414
+# define ENABLEFORCE (1 << 0)
#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 9e20fd070d7..159ef4be1ef 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -2,7 +2,6 @@
* TUSB6010 USB 2.0 OTG Dual Role controller
*
* Copyright (C) 2006 Nokia Corporation
- * Jarkko Nikula <jarkko.nikula@nokia.com>
* Tony Lindgren <tony@atomide.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -18,14 +17,24 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/prefetch.h>
#include <linux/usb.h>
#include <linux/irq.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
-static void tusb_source_power(struct musb *musb, int is_on);
+struct tusb6010_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct platform_device *phy;
+};
+
+static void tusb_musb_set_vbus(struct musb *musb, int is_on);
#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
@@ -34,7 +43,7 @@ static void tusb_source_power(struct musb *musb, int is_on);
* Checks the revision. We need to use the DMA register as 3.0 does not
* have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
*/
-u8 tusb_get_revision(struct musb *musb)
+static u8 tusb_get_revision(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
u32 die_id;
@@ -51,12 +60,12 @@ u8 tusb_get_revision(struct musb *musb)
return rev;
}
-static int __init tusb_print_revision(struct musb *musb)
+static void tusb_print_revision(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
u8 rev;
- rev = tusb_get_revision(musb);
+ rev = musb->tusb_revision;
pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
"prcm",
@@ -75,8 +84,6 @@ static int __init tusb_print_revision(struct musb *musb)
TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
"rev",
TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
-
- return tusb_get_revision(musb);
}
#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
@@ -101,7 +108,7 @@ static void tusb_wbus_quirk(struct musb *musb, int enabled)
tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
- DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
+ dev_dbg(musb->controller, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
musb_readl(tbase, TUSB_PHY_OTG_CTRL),
musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
} else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
@@ -110,7 +117,7 @@ static void tusb_wbus_quirk(struct musb *musb, int enabled)
musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
- DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
+ dev_dbg(musb->controller, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
musb_readl(tbase, TUSB_PHY_OTG_CTRL),
musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
phy_otg_ctrl = 0;
@@ -145,7 +152,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
}
static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
- void __iomem *buf, u16 len)
+ void *buf, u16 len)
{
u32 val;
int i;
@@ -167,13 +174,14 @@ static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
{
+ struct musb *musb = hw_ep->musb;
void __iomem *ep_conf = hw_ep->conf;
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
prefetch(buf);
- DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'T', epnum, fifo, len, buf);
if (epnum)
@@ -188,7 +196,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
- writesl(fifo, buf, len >> 2);
+ iowrite32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
@@ -216,11 +224,12 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
{
+ struct musb *musb = hw_ep->musb;
void __iomem *ep_conf = hw_ep->conf;
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
- DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
'R', epnum, fifo, len, buf);
if (epnum)
@@ -234,7 +243,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
- readsl(fifo, buf, len >> 2);
+ ioread32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
@@ -260,7 +269,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
tusb_fifo_read_unaligned(fifo, buf, len);
}
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static struct musb *the_musb;
/* This is used by gadget drivers, and OTG transceiver logic, allowing
* at most mA current to be drawn from VBUS during a Default-B session
@@ -268,23 +277,12 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
* mode), or low power Default-B sessions, something else supplies power.
* Caller must take care of locking.
*/
-static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
+static int tusb_draw_power(struct usb_phy *x, unsigned mA)
{
- struct musb *musb = container_of(x, struct musb, xceiv);
+ struct musb *musb = the_musb;
void __iomem *tbase = musb->ctrl_base;
u32 reg;
- /*
- * Keep clock active when enabled. Note that this is not tied to
- * drawing VBUS, as with OTG mA can be less than musb->min_power.
- */
- if (musb->set_clock) {
- if (mA)
- musb->set_clock(musb->clock, 1);
- else
- musb->set_clock(musb->clock, 0);
- }
-
/* tps65030 seems to consume max 100mA, with maybe 60mA available
* (measured on one board) for things other than tps and tusb.
*
@@ -295,7 +293,7 @@ static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
* The actual current usage would be very board-specific. For now,
* it's simpler to just use an aggregate (also board-specific).
*/
- if (x->default_a || mA < (musb->min_power << 1))
+ if (x->otg->default_a || mA < (musb->min_power << 1))
mA = 0;
reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
@@ -308,14 +306,10 @@ static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
}
musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
- DBG(2, "draw max %d mA VBUS\n", mA);
+ dev_dbg(musb->controller, "draw max %d mA VBUS\n", mA);
return 0;
}
-#else
-#define tusb_draw_power NULL
-#endif
-
/* workaround for issue 13: change clock during chip idle
* (to be fixed in rev3 silicon) ... symptoms include disconnect
* or looping suspend/resume cycles
@@ -347,13 +341,13 @@ static void tusb_set_clock_source(struct musb *musb, unsigned mode)
* USB link is not suspended ... and tells us the relevant wakeup
* events. SW_EN for voltage is handled separately.
*/
-void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
{
void __iomem *tbase = musb->ctrl_base;
u32 reg;
if ((wakeup_enables & TUSB_PRCM_WBUS)
- && (tusb_get_revision(musb) == TUSB_REV_30))
+ && (musb->tusb_revision == TUSB_REV_30))
tusb_wbus_quirk(musb, 1);
tusb_set_clock_source(musb, 0);
@@ -378,13 +372,13 @@ void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
- DBG(6, "idle, wake on %02x\n", wakeup_enables);
+ dev_dbg(musb->controller, "idle, wake on %02x\n", wakeup_enables);
}
/*
* Updates cable VBUS status. Caller must take care of locking.
*/
-int musb_platform_get_vbus_status(struct musb *musb)
+static int tusb_musb_vbus_status(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
u32 otg_stat, prcm_mngmt;
@@ -420,17 +414,17 @@ static void musb_do_idle(unsigned long _musb)
spin_lock_irqsave(&musb->lock, flags);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_BCON:
if ((musb->a_wait_bcon != 0)
&& (musb->idle_timeout == 0
|| time_after(jiffies, musb->idle_timeout))) {
- DBG(4, "Nothing connected %s, turning off VBUS\n",
- otg_state_string(musb));
+ dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n",
+ usb_otg_state_string(musb->xceiv->state));
}
/* FALLTHROUGH */
case OTG_STATE_A_IDLE:
- tusb_source_power(musb, 0);
+ tusb_musb_set_vbus(musb, 0);
default:
break;
}
@@ -442,19 +436,14 @@ static void musb_do_idle(unsigned long _musb)
if (is_host_active(musb) && (musb->port1_status >> 16))
goto done;
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- if (is_peripheral_enabled(musb) && !musb->gadget_driver)
+ if (!musb->gadget_driver) {
wakeups = 0;
- else {
+ } else {
wakeups = TUSB_PRCM_WHOSTDISCON
- | TUSB_PRCM_WBUS
+ | TUSB_PRCM_WBUS
| TUSB_PRCM_WVBUS;
- if (is_otg_enabled(musb))
- wakeups |= TUSB_PRCM_WID;
+ wakeups |= TUSB_PRCM_WID;
}
-#else
- wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
-#endif
tusb_allow_idle(musb, wakeups);
}
done:
@@ -474,7 +463,7 @@ done:
* we don't want to treat that full speed J as a wakeup event.
* ... peripherals must draw only suspend current after 10 msec.
*/
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
{
unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
static unsigned long last_timer;
@@ -484,8 +473,9 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
- DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->state));
del_timer(&musb_idle_timer);
last_timer = jiffies;
return;
@@ -495,14 +485,14 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
if (!timer_pending(&musb_idle_timer))
last_timer = timeout;
else {
- DBG(4, "Longer idle timer already pending, ignoring\n");
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
return;
}
}
last_timer = timeout;
- DBG(4, "%s inactive, for idle timer for %lu ms\n",
- otg_state_string(musb),
+ dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
+ usb_otg_state_string(musb->xceiv->state),
(unsigned long)jiffies_to_msecs(timeout - jiffies));
mod_timer(&musb_idle_timer, timeout);
}
@@ -514,11 +504,12 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
| TUSB_DEV_OTG_TIMER_ENABLE) \
: 0)
-static void tusb_source_power(struct musb *musb, int is_on)
+static void tusb_musb_set_vbus(struct musb *musb, int is_on)
{
void __iomem *tbase = musb->ctrl_base;
u32 conf, prcm, timer;
u8 devctl;
+ struct usb_otg *otg = musb->xceiv->otg;
/* HDRC controls CPEN, but beware current surges during device
* connect. They can trigger transient overcurrent conditions
@@ -530,11 +521,9 @@ static void tusb_source_power(struct musb *musb, int is_on)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (is_on) {
- if (musb->set_clock)
- musb->set_clock(musb->clock, 1);
timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
- musb->xceiv.default_a = 1;
- musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
devctl |= MUSB_DEVCTL_SESSION;
conf |= TUSB_DEV_CONF_USB_HOST_MODE;
@@ -547,31 +536,29 @@ static void tusb_source_power(struct musb *musb, int is_on)
/* If ID pin is grounded, we want to be a_idle */
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_VRISE:
case OTG_STATE_A_WAIT_BCON:
- musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
break;
case OTG_STATE_A_WAIT_VFALL:
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
break;
default:
- musb->xceiv.state = OTG_STATE_A_IDLE;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
}
musb->is_active = 0;
- musb->xceiv.default_a = 1;
+ otg->default_a = 1;
MUSB_HST_MODE(musb);
} else {
musb->is_active = 0;
- musb->xceiv.default_a = 0;
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ otg->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
}
devctl &= ~MUSB_DEVCTL_SESSION;
conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
- if (musb->set_clock)
- musb->set_clock(musb->clock, 0);
}
prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
@@ -580,8 +567,8 @@ static void tusb_source_power(struct musb *musb, int is_on)
musb_writel(tbase, TUSB_DEV_CONF, conf);
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
- otg_state_string(musb),
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
+ usb_otg_state_string(musb->xceiv->state),
musb_readb(musb->mregs, MUSB_DEVCTL),
musb_readl(tbase, TUSB_DEV_OTG_STAT),
conf, prcm);
@@ -593,21 +580,12 @@ static void tusb_source_power(struct musb *musb, int is_on)
*
* Note that if a mini-A cable is plugged in the ID line will stay down as
* the weak ID pull-up is not able to pull the ID up.
- *
- * REVISIT: It would be possible to add support for changing between host
- * and peripheral modes in non-OTG configurations by reconfiguring hardware
- * and then setting musb->board_mode. For now, only support OTG mode.
*/
-int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode)
{
void __iomem *tbase = musb->ctrl_base;
u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
- if (musb->board_mode != MUSB_OTG) {
- ERR("Changing mode currently only supported in OTG mode\n");
- return -EINVAL;
- }
-
otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
@@ -615,33 +593,25 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case MUSB_HOST: /* Disable PHY ID detect, ground ID */
phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf |= TUSB_DEV_CONF_ID_SEL;
dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
break;
-#endif
-
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
break;
-#endif
-
-#ifdef CONFIG_USB_MUSB_OTG
case MUSB_OTG: /* Use PHY ID detection */
phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
break;
-#endif
default:
- DBG(2, "Trying to set mode %i\n", musb_mode);
+ dev_dbg(musb->controller, "Trying to set mode %i\n", musb_mode);
return -EINVAL;
}
@@ -665,18 +635,16 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
{
u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
unsigned long idle_timeout = 0;
+ struct usb_otg *otg = musb->xceiv->otg;
/* ID pin */
if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
int default_a;
- if (is_otg_enabled(musb))
- default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
- else
- default_a = is_host_enabled(musb);
- DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
- musb->xceiv.default_a = default_a;
- tusb_source_power(musb, default_a);
+ default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
+ dev_dbg(musb->controller, "Default-%c\n", default_a ? 'A' : 'B');
+ otg->default_a = default_a;
+ tusb_musb_set_vbus(musb, default_a);
/* Don't allow idling immediately */
if (default_a)
@@ -687,9 +655,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
/* B-dev state machine: no vbus ~= disconnect */
- if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
- || !is_host_enabled(musb)) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!otg->default_a) {
/* ? musb_root_disconnect(musb); */
musb->port1_status &=
~(USB_PORT_STAT_CONNECTION
@@ -698,30 +664,29 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
| USB_PORT_STAT_HIGH_SPEED
| USB_PORT_STAT_TEST
);
-#endif
if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
- DBG(1, "Forcing disconnect (no interrupt)\n");
- if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+ dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n");
+ if (musb->xceiv->state != OTG_STATE_B_IDLE) {
/* INTR_DISCONNECT can hide... */
- musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
musb->int_usb |= MUSB_INTR_DISCONNECT;
}
musb->is_active = 0;
}
- DBG(2, "vbus change, %s, otg %03x\n",
- otg_state_string(musb), otg_stat);
+ dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
+ usb_otg_state_string(musb->xceiv->state), otg_stat);
idle_timeout = jiffies + (1 * HZ);
schedule_work(&musb->irq_work);
} else /* A-dev state machine */ {
- DBG(2, "vbus change, %s, otg %03x\n",
- otg_state_string(musb), otg_stat);
+ dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
+ usb_otg_state_string(musb->xceiv->state), otg_stat);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_IDLE:
- DBG(2, "Got SRP, turning on VBUS\n");
- musb_set_vbus(musb, 1);
+ dev_dbg(musb->controller, "Got SRP, turning on VBUS\n");
+ musb_platform_set_vbus(musb, 1);
/* CONNECT can wake if a_wait_bcon is set */
if (musb->a_wait_bcon != 0)
@@ -747,11 +712,11 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
*/
if (musb->vbuserr_retry) {
musb->vbuserr_retry--;
- tusb_source_power(musb, 1);
+ tusb_musb_set_vbus(musb, 1);
} else {
musb->vbuserr_retry
= VBUSERR_RETRY_COUNT;
- tusb_source_power(musb, 0);
+ tusb_musb_set_vbus(musb, 0);
}
break;
default:
@@ -764,9 +729,10 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
u8 devctl;
- DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
+ dev_dbg(musb->controller, "%s timer, %03x\n",
+ usb_otg_state_string(musb->xceiv->state), otg_stat);
- switch (musb->xceiv.state) {
+ switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_VRISE:
/* VBUS has probably been valid for a while now,
* but may well have bounced out of range a bit
@@ -775,17 +741,17 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
if ((devctl & MUSB_DEVCTL_VBUS)
!= MUSB_DEVCTL_VBUS) {
- DBG(2, "devctl %02x\n", devctl);
+ dev_dbg(musb->controller, "devctl %02x\n", devctl);
break;
}
- musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
idle_timeout = jiffies
+ msecs_to_jiffies(musb->a_wait_bcon);
} else {
/* REVISIT report overcurrent to hub? */
ERR("vbus too slow, devctl %02x\n", devctl);
- tusb_source_power(musb, 0);
+ tusb_musb_set_vbus(musb, 0);
}
break;
case OTG_STATE_A_WAIT_BCON:
@@ -806,7 +772,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
return idle_timeout;
}
-static irqreturn_t tusb_interrupt(int irq, void *__hci)
+static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
{
struct musb *musb = __hci;
void __iomem *tbase = musb->ctrl_base;
@@ -820,7 +786,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
- DBG(3, "TUSB IRQ %08x\n", int_src);
+ dev_dbg(musb->controller, "TUSB IRQ %08x\n", int_src);
musb->int_usb = (u8) int_src;
@@ -829,7 +795,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
u32 reg;
u32 i;
- if (tusb_get_revision(musb) == TUSB_REV_30)
+ if (musb->tusb_revision == TUSB_REV_30)
tusb_wbus_quirk(musb, 0);
/* there are issues re-locking the PLL on wakeup ... */
@@ -841,7 +807,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
if (reg == i)
break;
- DBG(6, "TUSB NOR not ready\n");
+ dev_dbg(musb->controller, "TUSB NOR not ready\n");
}
/* work around issue 13 (2nd half) */
@@ -853,7 +819,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
musb->is_active = 1;
schedule_work(&musb->irq_work);
}
- DBG(3, "wake %sactive %02x\n",
+ dev_dbg(musb->controller, "wake %sactive %02x\n",
musb->is_active ? "" : "in", reg);
/* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
@@ -875,7 +841,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
- DBG(3, "DMA IRQ %08x\n", dma_src);
+ dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src);
real_dma_src = ~real_dma_src & dma_src;
if (tusb_dma_omap() && real_dma_src) {
int tx_source = (real_dma_src & 0xffff);
@@ -883,7 +849,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
for (i = 1; i <= 15; i++) {
if (tx_source & (1 << i)) {
- DBG(3, "completing ep%i %s\n", i, "tx");
+ dev_dbg(musb->controller, "completing ep%i %s\n", i, "tx");
musb_dma_completion(musb, i, 1);
}
}
@@ -910,7 +876,7 @@ static irqreturn_t tusb_interrupt(int irq, void *__hci)
musb_writel(tbase, TUSB_INT_SRC_CLEAR,
int_src & ~TUSB_INT_MASK_RESERVED_BITS);
- musb_platform_try_idle(musb, idle_timeout);
+ tusb_musb_try_idle(musb, idle_timeout);
musb_writel(tbase, TUSB_INT_MASK, int_mask);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -925,7 +891,7 @@ static int dma_off;
* REVISIT:
* - Check what is unnecessary in MGC_HdrcStart()
*/
-void musb_platform_enable(struct musb *musb)
+static void tusb_musb_enable(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
@@ -951,7 +917,7 @@ void musb_platform_enable(struct musb *musb)
musb_writel(tbase, TUSB_INT_CTRL_CONF,
TUSB_INT_CTRL_CONF_INT_RELCYC(0));
- set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
/* maybe force into the Default-A OTG state machine */
if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
@@ -969,7 +935,7 @@ void musb_platform_enable(struct musb *musb)
/*
* Disables TUSB6010. Caller must take care of locking.
*/
-void musb_platform_disable(struct musb *musb)
+static void tusb_musb_disable(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
@@ -994,7 +960,7 @@ void musb_platform_disable(struct musb *musb)
* Sets up TUSB6010 CPU interface specific signals and registers
* Note: Settings optimized for OMAP24xx
*/
-static void __init tusb_setup_cpu_interface(struct musb *musb)
+static void tusb_setup_cpu_interface(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
@@ -1021,7 +987,7 @@ static void __init tusb_setup_cpu_interface(struct musb *musb)
musb_writel(tbase, TUSB_WAIT_COUNT, 1);
}
-static int __init tusb_start(struct musb *musb)
+static int tusb_musb_start(struct musb *musb)
{
void __iomem *tbase = musb->ctrl_base;
int ret = 0;
@@ -1043,10 +1009,11 @@ static int __init tusb_start(struct musb *musb)
goto err;
}
- ret = tusb_print_revision(musb);
- if (ret < 2) {
+ musb->tusb_revision = tusb_get_revision(musb);
+ tusb_print_revision(musb);
+ if (musb->tusb_revision < 2) {
printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
- ret);
+ musb->tusb_revision);
goto err;
}
@@ -1090,13 +1057,17 @@ err:
return -ENODEV;
}
-int __init musb_platform_init(struct musb *musb)
+static int tusb_musb_init(struct musb *musb)
{
struct platform_device *pdev;
struct resource *mem;
- void __iomem *sync;
+ void __iomem *sync = NULL;
int ret;
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv))
+ return -EPROBE_DEFER;
+
pdev = to_platform_device(musb->controller);
/* dma address for async dma */
@@ -1107,14 +1078,16 @@ int __init musb_platform_init(struct musb *musb)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!mem) {
pr_debug("no sync dma resource?\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto done;
}
musb->sync = mem->start;
- sync = ioremap(mem->start, mem->end - mem->start + 1);
+ sync = ioremap(mem->start, resource_size(mem));
if (!sync) {
pr_debug("ioremap for sync failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto done;
}
musb->sync_va = sync;
@@ -1123,32 +1096,146 @@ int __init musb_platform_init(struct musb *musb)
*/
musb->mregs += TUSB_BASE_OFFSET;
- ret = tusb_start(musb);
+ ret = tusb_musb_start(musb);
if (ret) {
printk(KERN_ERR "Could not start tusb6010 (%d)\n",
ret);
- return -ENODEV;
+ goto done;
}
- musb->isr = tusb_interrupt;
+ musb->isr = tusb_musb_interrupt;
- if (is_host_enabled(musb))
- musb->board_set_vbus = tusb_source_power;
- if (is_peripheral_enabled(musb))
- musb->xceiv.set_power = tusb_draw_power;
+ musb->xceiv->set_power = tusb_draw_power;
+ the_musb = musb;
setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+done:
+ if (ret < 0) {
+ if (sync)
+ iounmap(sync);
+
+ usb_put_phy(musb->xceiv);
+ }
return ret;
}
-int musb_platform_exit(struct musb *musb)
+static int tusb_musb_exit(struct musb *musb)
{
del_timer_sync(&musb_idle_timer);
+ the_musb = NULL;
if (musb->board_set_power)
musb->board_set_power(0);
iounmap(musb->sync_va);
+ usb_put_phy(musb->xceiv);
return 0;
}
+
+static const struct musb_platform_ops tusb_ops = {
+ .init = tusb_musb_init,
+ .exit = tusb_musb_exit,
+
+ .enable = tusb_musb_enable,
+ .disable = tusb_musb_disable,
+
+ .set_mode = tusb_musb_set_mode,
+ .try_idle = tusb_musb_try_idle,
+
+ .vbus_status = tusb_musb_vbus_status,
+ .set_vbus = tusb_musb_set_vbus,
+};
+
+static const struct platform_device_info tusb_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int tusb_probe(struct platform_device *pdev)
+{
+ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct platform_device *musb;
+ struct tusb6010_glue *glue;
+ struct platform_device_info pinfo;
+ int ret = -ENOMEM;
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "failed to allocate glue context\n");
+ goto err0;
+ }
+
+ glue->dev = &pdev->dev;
+
+ pdata->platform_ops = &tusb_ops;
+
+ usb_phy_generic_register();
+ platform_set_drvdata(pdev, glue);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
+ pinfo = tusb_dev_info;
+ pinfo.parent = &pdev->dev;
+ pinfo.res = musb_resources;
+ pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb = musb = platform_device_register_full(&pinfo);
+ if (IS_ERR(musb)) {
+ ret = PTR_ERR(musb);
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+ goto err3;
+ }
+
+ return 0;
+
+err3:
+ kfree(glue);
+
+err0:
+ return ret;
+}
+
+static int tusb_remove(struct platform_device *pdev)
+{
+ struct tusb6010_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ usb_phy_generic_unregister(glue->phy);
+ kfree(glue);
+
+ return 0;
+}
+
+static struct platform_driver tusb_driver = {
+ .probe = tusb_probe,
+ .remove = tusb_remove,
+ .driver = {
+ .name = "musb-tusb",
+ },
+};
+
+MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(tusb_driver);
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
index ab8c96286ce..aec86c86ce3 100644
--- a/drivers/usb/musb/tusb6010.h
+++ b/drivers/usb/musb/tusb6010.h
@@ -2,7 +2,6 @@
* Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
*
* Copyright (C) 2006 Nokia Corporation
- * Jarkko Nikula <jarkko.nikula@nokia.com>
* Tony Lindgren <tony@atomide.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -13,14 +12,6 @@
#ifndef __TUSB6010_H__
#define __TUSB6010_H__
-extern u8 tusb_get_revision(struct musb *musb);
-
-#ifdef CONFIG_USB_TUSB6010
-#define musb_in_tusb() 1
-#else
-#define musb_in_tusb() 0
-#endif
-
#ifdef CONFIG_USB_TUSB_OMAP_DMA
#define tusb_dma_omap() 1
#else
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 7e073a0d7ac..3ce152c0408 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -11,19 +11,26 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <mach/dma.h>
-#include <mach/mux.h>
+#include <linux/slab.h>
+#include <linux/omap-dma.h>
#include "musb_core.h"
+#include "tusb6010.h"
#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
+#define OMAP24XX_DMA_EXT_DMAREQ0 2
+#define OMAP24XX_DMA_EXT_DMAREQ1 3
+#define OMAP242X_DMA_EXT_DMAREQ2 14
+#define OMAP242X_DMA_EXT_DMAREQ3 15
+#define OMAP242X_DMA_EXT_DMAREQ4 16
+#define OMAP242X_DMA_EXT_DMAREQ5 64
+
struct tusb_omap_dma_ch {
struct musb *musb;
void __iomem *tbase;
@@ -38,7 +45,7 @@ struct tusb_omap_dma_ch {
struct tusb_omap_dma *tusb_dma;
- void __iomem *dma_addr;
+ dma_addr_t dma_addr;
u32 len;
u16 packet_sz;
@@ -58,28 +65,6 @@ struct tusb_omap_dma {
unsigned multichannel:1;
};
-static int tusb_omap_dma_start(struct dma_controller *c)
-{
- struct tusb_omap_dma *tusb_dma;
-
- tusb_dma = container_of(c, struct tusb_omap_dma, controller);
-
- /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
-
- return 0;
-}
-
-static int tusb_omap_dma_stop(struct dma_controller *c)
-{
- struct tusb_omap_dma *tusb_dma;
-
- tusb_dma = container_of(c, struct tusb_omap_dma, controller);
-
- /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
-
- return 0;
-}
-
/*
* Allocate dmareq0 to the current channel unless it's already taken
*/
@@ -88,7 +73,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
if (reg != 0) {
- DBG(3, "ep%i dmareq0 is busy for ep%i\n",
+ dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
chdat->epnum, reg & 0xf);
return -EAGAIN;
}
@@ -125,6 +110,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
struct tusb_omap_dma_ch *chdat = to_chdat(channel);
struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
struct musb *musb = chdat->musb;
+ struct device *dev = musb->controller;
struct musb_hw_ep *hw_ep = chdat->hw_ep;
void __iomem *ep_conf = hw_ep->conf;
void __iomem *mbase = musb->mregs;
@@ -141,7 +127,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
if (ch_status != OMAP_DMA_BLOCK_IRQ)
printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
- DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
+ dev_dbg(musb->controller, "ep%i %s dma callback ch: %i status: %x\n",
chdat->epnum, chdat->tx ? "tx" : "rx",
ch, ch_status);
@@ -154,7 +140,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
/* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
if (unlikely(remaining > chdat->transfer_len)) {
- DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
+ dev_dbg(musb->controller, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
chdat->tx ? "tx" : "rx", chdat->ch,
remaining);
remaining = 0;
@@ -163,22 +149,24 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
channel->actual_len = chdat->transfer_len - remaining;
pio = chdat->len - channel->actual_len;
- DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
+ dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
/* Transfer remaining 1 - 31 bytes */
if (pio > 0 && pio < 32) {
u8 *buf;
- DBG(3, "Using PIO for remaining %lu bytes\n", pio);
+ dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
if (chdat->tx) {
- dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
- chdat->transfer_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, chdat->dma_addr,
+ chdat->transfer_len,
+ DMA_TO_DEVICE);
musb_write_fifo(hw_ep, pio, buf);
} else {
+ dma_unmap_single(dev, chdat->dma_addr,
+ chdat->transfer_len,
+ DMA_FROM_DEVICE);
musb_read_fifo(hw_ep, pio, buf);
- dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
- chdat->transfer_len, DMA_FROM_DEVICE);
}
channel->actual_len += pio;
}
@@ -205,7 +193,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
u16 csr;
if (chdat->tx) {
- DBG(3, "terminating short tx packet\n");
+ dev_dbg(musb->controller, "terminating short tx packet\n");
musb_ep_select(mbase, chdat->epnum);
csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
@@ -223,6 +211,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
struct tusb_omap_dma_ch *chdat = to_chdat(channel);
struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
struct musb *musb = chdat->musb;
+ struct device *dev = musb->controller;
struct musb_hw_ep *hw_ep = chdat->hw_ep;
void __iomem *mbase = musb->mregs;
void __iomem *ep_conf = hw_ep->conf;
@@ -259,7 +248,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
if (dma_remaining) {
- DBG(2, "Busy %s dma ch%i, not using: %08x\n",
+ dev_dbg(musb->controller, "Busy %s dma ch%i, not using: %08x\n",
chdat->tx ? "tx" : "rx", chdat->ch,
dma_remaining);
return false;
@@ -278,7 +267,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
sync_dev = chdat->sync_dev;
} else {
if (tusb_omap_use_shared_dmareq(chdat) != 0) {
- DBG(3, "could not get dma for ep%i\n", chdat->epnum);
+ dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
return false;
}
if (tusb_dma->ch < 0) {
@@ -298,14 +287,16 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
chdat->packet_sz = packet_sz;
chdat->len = len;
channel->actual_len = 0;
- chdat->dma_addr = (void __iomem *)dma_addr;
+ chdat->dma_addr = dma_addr;
channel->status = MUSB_DMA_STATUS_BUSY;
/* Since we're recycling dma areas, we need to clean or invalidate */
if (chdat->tx)
- dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
+ dma_map_single(dev, phys_to_virt(dma_addr), len,
+ DMA_TO_DEVICE);
else
- dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
+ dma_map_single(dev, phys_to_virt(dma_addr), len,
+ DMA_FROM_DEVICE);
/* Use 16-bit transfer if dma_addr is not 32-bit aligned */
if ((dma_addr & 0x3) == 0) {
@@ -319,7 +310,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
- DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
+ dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
chdat->epnum, chdat->tx ? "tx" : "rx",
ch, dma_addr, chdat->transfer_len, len,
chdat->transfer_packet_sz, packet_sz);
@@ -363,7 +354,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */
}
- DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
+ dev_dbg(musb->controller, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
chdat->epnum, chdat->tx ? "tx" : "rx",
(dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
((dma_addr & 0x3) == 0) ? "sync" : "async",
@@ -518,7 +509,7 @@ tusb_omap_dma_allocate(struct dma_controller *c,
/* REVISIT: Why does dmareq5 not work? */
if (hw_ep->epnum == 0) {
- DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+ dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
return NULL;
}
@@ -578,7 +569,7 @@ tusb_omap_dma_allocate(struct dma_controller *c,
chdat->ch = -1;
}
- DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+ dev_dbg(musb->controller, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
chdat->epnum,
chdat->tx ? "tx" : "rx",
chdat->ch >= 0 ? "dedicated" : "shared",
@@ -591,7 +582,7 @@ tusb_omap_dma_allocate(struct dma_controller *c,
free_dmareq:
tusb_omap_dma_free_dmareq(chdat);
- DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+ dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
channel->status = MUSB_DMA_STATUS_UNKNOWN;
return NULL;
@@ -604,7 +595,7 @@ static void tusb_omap_dma_release(struct dma_channel *channel)
void __iomem *tbase = musb->ctrl_base;
u32 reg;
- DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
+ dev_dbg(musb->controller, "ep%i ch%i\n", chdat->epnum, chdat->ch);
reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
if (chdat->tx)
@@ -648,14 +639,13 @@ void dma_controller_destroy(struct dma_controller *c)
}
}
- if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+ if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0)
omap_free_dma(tusb_dma->ch);
kfree(tusb_dma);
}
-struct dma_controller *__init
-dma_controller_create(struct musb *musb, void __iomem *base)
+struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *base)
{
void __iomem *tbase = musb->ctrl_base;
struct tusb_omap_dma *tusb_dma;
@@ -673,7 +663,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
if (!tusb_dma)
- goto cleanup;
+ goto out;
tusb_dma->musb = musb;
tusb_dma->tbase = musb->ctrl_base;
@@ -682,14 +672,12 @@ dma_controller_create(struct musb *musb, void __iomem *base)
tusb_dma->dmareq = -1;
tusb_dma->sync_dev = -1;
- tusb_dma->controller.start = tusb_omap_dma_start;
- tusb_dma->controller.stop = tusb_omap_dma_stop;
tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
tusb_dma->controller.channel_release = tusb_omap_dma_release;
tusb_dma->controller.channel_program = tusb_omap_dma_program;
tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
- if (tusb_get_revision(musb) >= TUSB_REV_30)
+ if (musb->tusb_revision >= TUSB_REV_30)
tusb_dma->multichannel = 1;
for (i = 0; i < MAX_DMAREQ; i++) {
@@ -714,6 +702,6 @@ dma_controller_create(struct musb *musb, void __iomem *base)
cleanup:
dma_controller_destroy(&tusb_dma->controller);
-
+out:
return NULL;
}
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
new file mode 100644
index 00000000000..f202e508846
--- /dev/null
+++ b/drivers/usb/musb/ux500.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson AB
+ * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ *
+ * Based on omap2430.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/usb/musb-ux500.h>
+
+#include "musb_core.h"
+
+static struct musb_hdrc_config ux500_musb_hdrc_config = {
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = 16,
+ .ram_bits = 16,
+};
+
+struct ux500_glue {
+ struct device *dev;
+ struct platform_device *musb;
+ struct clk *clk;
+};
+#define glue_to_musb(g) platform_get_drvdata(g->musb)
+
+static void ux500_musb_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ /*
+ * Wait for the musb to set as A device to enable the
+ * VBUS
+ */
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(musb->controller,
+ "configured as A device timeout");
+ break;
+ }
+ }
+
+ } else {
+ musb->is_active = 1;
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ }
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and jumping
+ * right to B_IDLE...
+ */
+ musb->xceiv->otg->default_a = 0;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ /*
+ * Devctl values will be updated after vbus goes below
+ * session_valid. The time taken depends on the capacitance
+ * on VBUS line. The max discharge time can be upto 1 sec
+ * as per the spec. Typically on our platform, it is 200ms
+ */
+ if (!is_on)
+ mdelay(200);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static int musb_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct musb *musb = container_of(nb, struct musb, nb);
+
+ dev_dbg(musb->controller, "musb_otg_notifications %ld %s\n",
+ event, usb_otg_state_string(musb->xceiv->state));
+
+ switch (event) {
+ case UX500_MUSB_ID:
+ dev_dbg(musb->controller, "ID GND\n");
+ ux500_musb_set_vbus(musb, 1);
+ break;
+ case UX500_MUSB_VBUS:
+ dev_dbg(musb->controller, "VBUS Connect\n");
+ break;
+ case UX500_MUSB_NONE:
+ dev_dbg(musb->controller, "VBUS Disconnect\n");
+ if (is_host_active(musb))
+ ux500_musb_set_vbus(musb, 0);
+ else
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ dev_dbg(musb->controller, "ID float\n");
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static irqreturn_t ux500_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static int ux500_musb_init(struct musb *musb)
+{
+ int status;
+
+ musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(musb->xceiv)) {
+ pr_err("HS USB OTG: no transceiver configured\n");
+ return -EPROBE_DEFER;
+ }
+
+ musb->nb.notifier_call = musb_otg_notifications;
+ status = usb_register_notifier(musb->xceiv, &musb->nb);
+ if (status < 0) {
+ dev_dbg(musb->controller, "notification register failed\n");
+ return status;
+ }
+
+ musb->isr = ux500_musb_interrupt;
+
+ return 0;
+}
+
+static int ux500_musb_exit(struct musb *musb)
+{
+ usb_unregister_notifier(musb->xceiv, &musb->nb);
+
+ usb_put_phy(musb->xceiv);
+
+ return 0;
+}
+
+static const struct musb_platform_ops ux500_ops = {
+ .init = ux500_musb_init,
+ .exit = ux500_musb_exit,
+
+ .set_vbus = ux500_musb_set_vbus,
+};
+
+static struct musb_hdrc_platform_data *
+ux500_of_probe(struct platform_device *pdev, struct device_node *np)
+{
+ struct musb_hdrc_platform_data *pdata;
+ const char *mode;
+ int strlen;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ mode = of_get_property(np, "dr_mode", &strlen);
+ if (!mode) {
+ dev_err(&pdev->dev, "No 'dr_mode' property found\n");
+ return NULL;
+ }
+
+ if (strlen > 0) {
+ if (!strcmp(mode, "host"))
+ pdata->mode = MUSB_HOST;
+ if (!strcmp(mode, "otg"))
+ pdata->mode = MUSB_OTG;
+ if (!strcmp(mode, "peripheral"))
+ pdata->mode = MUSB_PERIPHERAL;
+ }
+
+ return pdata;
+}
+
+static int ux500_probe(struct platform_device *pdev)
+{
+ struct resource musb_resources[2];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct platform_device *musb;
+ struct ux500_glue *glue;
+ struct clk *clk;
+ int ret = -ENOMEM;
+
+ if (!pdata) {
+ if (np) {
+ pdata = ux500_of_probe(pdev, np);
+ if (!pdata)
+ goto err0;
+
+ pdev->dev.platform_data = pdata;
+ } else {
+ dev_err(&pdev->dev, "no pdata or device tree found\n");
+ goto err0;
+ }
+ }
+
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&pdev->dev, "failed to allocate glue context\n");
+ goto err0;
+ }
+
+ musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
+ if (!musb) {
+ dev_err(&pdev->dev, "failed to allocate musb device\n");
+ goto err1;
+ }
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(clk);
+ goto err3;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto err4;
+ }
+
+ musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+ glue->clk = clk;
+
+ pdata->platform_ops = &ux500_ops;
+ pdata->config = &ux500_musb_hdrc_config;
+
+ platform_set_drvdata(pdev, glue);
+
+ memset(musb_resources, 0x00, sizeof(*musb_resources) *
+ ARRAY_SIZE(musb_resources));
+
+ musb_resources[0].name = pdev->resource[0].name;
+ musb_resources[0].start = pdev->resource[0].start;
+ musb_resources[0].end = pdev->resource[0].end;
+ musb_resources[0].flags = pdev->resource[0].flags;
+
+ musb_resources[1].name = pdev->resource[1].name;
+ musb_resources[1].start = pdev->resource[1].start;
+ musb_resources[1].end = pdev->resource[1].end;
+ musb_resources[1].flags = pdev->resource[1].flags;
+
+ ret = platform_device_add_resources(musb, musb_resources,
+ ARRAY_SIZE(musb_resources));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto err5;
+ }
+
+ ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add platform_data\n");
+ goto err5;
+ }
+
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err5;
+ }
+
+ return 0;
+
+err5:
+ clk_disable_unprepare(clk);
+
+err4:
+ clk_put(clk);
+
+err3:
+ platform_device_put(musb);
+
+err1:
+ kfree(glue);
+
+err0:
+ return ret;
+}
+
+static int ux500_remove(struct platform_device *pdev)
+{
+ struct ux500_glue *glue = platform_get_drvdata(pdev);
+
+ platform_device_unregister(glue->musb);
+ clk_disable_unprepare(glue->clk);
+ clk_put(glue->clk);
+ kfree(glue);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ux500_suspend(struct device *dev)
+{
+ struct ux500_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+
+ usb_phy_set_suspend(musb->xceiv, 1);
+ clk_disable_unprepare(glue->clk);
+
+ return 0;
+}
+
+static int ux500_resume(struct device *dev)
+{
+ struct ux500_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue_to_musb(glue);
+ int ret;
+
+ ret = clk_prepare_enable(glue->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ usb_phy_set_suspend(musb->xceiv, 0);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ux500_pm_ops, ux500_suspend, ux500_resume);
+
+static const struct of_device_id ux500_match[] = {
+ { .compatible = "stericsson,db8500-musb", },
+ {}
+};
+
+static struct platform_driver ux500_driver = {
+ .probe = ux500_probe,
+ .remove = ux500_remove,
+ .driver = {
+ .name = "musb-ux500",
+ .pm = &ux500_pm_ops,
+ .of_match_table = ux500_match,
+ },
+};
+
+MODULE_DESCRIPTION("UX500 MUSB Glue Layer");
+MODULE_AUTHOR("Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>");
+MODULE_LICENSE("GPL v2");
+module_platform_driver(ux500_driver);
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
new file mode 100644
index 00000000000..9aad00f11bd
--- /dev/null
+++ b/drivers/usb/musb/ux500_dma.c
@@ -0,0 +1,412 @@
+/*
+ * drivers/usb/musb/ux500_dma.c
+ *
+ * U8500 DMA support code
+ *
+ * Copyright (C) 2009 STMicroelectronics
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Authors:
+ * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * Praveena Nadahally <praveen.nadahally@stericsson.com>
+ * Rajaram Regupathy <ragupathy.rajaram@stericsson.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pfn.h>
+#include <linux/sizes.h>
+#include <linux/platform_data/usb-musb-ux500.h>
+#include "musb_core.h"
+
+static const char *iep_chan_names[] = { "iep_1_9", "iep_2_10", "iep_3_11", "iep_4_12",
+ "iep_5_13", "iep_6_14", "iep_7_15", "iep_8" };
+static const char *oep_chan_names[] = { "oep_1_9", "oep_2_10", "oep_3_11", "oep_4_12",
+ "oep_5_13", "oep_6_14", "oep_7_15", "oep_8" };
+
+struct ux500_dma_channel {
+ struct dma_channel channel;
+ struct ux500_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+ struct dma_chan *dma_chan;
+ unsigned int cur_len;
+ dma_cookie_t cookie;
+ u8 ch_num;
+ u8 is_tx;
+ u8 is_allocated;
+};
+
+struct ux500_dma_controller {
+ struct dma_controller controller;
+ struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
+ struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
+ void *private_data;
+ dma_addr_t phy_base;
+};
+
+/* Work function invoked from DMA callback to handle rx transfers. */
+static void ux500_dma_callback(void *private_data)
+{
+ struct dma_channel *channel = private_data;
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ unsigned long flags;
+
+ dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
+ hw_ep->epnum);
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ux500_channel->channel.actual_len = ux500_channel->cur_len;
+ ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ musb_dma_completion(musb, hw_ep->epnum, ux500_channel->is_tx);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+}
+
+static bool ux500_configure_channel(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+ struct dma_chan *dma_chan = ux500_channel->dma_chan;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ struct scatterlist sg;
+ struct dma_slave_config slave_conf;
+ enum dma_slave_buswidth addr_width;
+ dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
+ ux500_channel->controller->phy_base);
+ struct musb *musb = ux500_channel->controller->private_data;
+
+ dev_dbg(musb->controller,
+ "packet_sz=%d, mode=%d, dma_addr=0x%llu, len=%d is_tx=%d\n",
+ packet_sz, mode, (unsigned long long) dma_addr,
+ len, ux500_channel->is_tx);
+
+ ux500_channel->cur_len = len;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len,
+ offset_in_page(dma_addr));
+ sg_dma_address(&sg) = dma_addr;
+ sg_dma_len(&sg) = len;
+
+ direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ slave_conf.direction = direction;
+ slave_conf.src_addr = usb_fifo_addr;
+ slave_conf.src_addr_width = addr_width;
+ slave_conf.src_maxburst = 16;
+ slave_conf.dst_addr = usb_fifo_addr;
+ slave_conf.dst_addr_width = addr_width;
+ slave_conf.dst_maxburst = 16;
+ slave_conf.device_fc = false;
+
+ dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
+ (unsigned long) &slave_conf);
+
+ dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc)
+ return false;
+
+ dma_desc->callback = ux500_dma_callback;
+ dma_desc->callback_param = channel;
+ ux500_channel->cookie = dma_desc->tx_submit(dma_desc);
+
+ dma_async_issue_pending(dma_chan);
+
+ return true;
+}
+
+static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 is_tx)
+{
+ struct ux500_dma_controller *controller = container_of(c,
+ struct ux500_dma_controller, controller);
+ struct ux500_dma_channel *ux500_channel = NULL;
+ struct musb *musb = controller->private_data;
+ u8 ch_num = hw_ep->epnum - 1;
+
+ /* 8 DMA channels (0 - 7). Each DMA channel can only be allocated
+ * to specified hw_ep. For example DMA channel 0 can only be allocated
+ * to hw_ep 1 and 9.
+ */
+ if (ch_num > 7)
+ ch_num -= 8;
+
+ if (ch_num >= UX500_MUSB_DMA_NUM_RX_TX_CHANNELS)
+ return NULL;
+
+ ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) :
+ &(controller->rx_channel[ch_num]) ;
+
+ /* Check if channel is already used. */
+ if (ux500_channel->is_allocated)
+ return NULL;
+
+ ux500_channel->hw_ep = hw_ep;
+ ux500_channel->is_allocated = 1;
+
+ dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
+ hw_ep->epnum, is_tx, ch_num);
+
+ return &(ux500_channel->channel);
+}
+
+static void ux500_dma_channel_release(struct dma_channel *channel)
+{
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb *musb = ux500_channel->controller->private_data;
+
+ dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);
+
+ if (ux500_channel->is_allocated) {
+ ux500_channel->is_allocated = 0;
+ channel->status = MUSB_DMA_STATUS_FREE;
+ channel->actual_len = 0;
+ }
+}
+
+static int ux500_dma_is_compatible(struct dma_channel *channel,
+ u16 maxpacket, void *buf, u32 length)
+{
+ if ((maxpacket & 0x3) ||
+ ((unsigned long int) buf & 0x3) ||
+ (length < 512) ||
+ (length & 0x3))
+ return false;
+ else
+ return true;
+}
+
+static int ux500_dma_channel_program(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ int ret;
+
+ BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ channel->status == MUSB_DMA_STATUS_BUSY);
+
+ if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
+ return false;
+
+ channel->status = MUSB_DMA_STATUS_BUSY;
+ channel->actual_len = 0;
+ ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
+ if (!ret)
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return ret;
+}
+
+static int ux500_dma_channel_abort(struct dma_channel *channel)
+{
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct ux500_dma_controller *controller = ux500_channel->controller;
+ struct musb *musb = controller->private_data;
+ void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
+ u16 csr;
+
+ dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
+ ux500_channel->ch_num, ux500_channel->is_tx);
+
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ if (ux500_channel->is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_DMAMODE);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+ MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ ux500_channel->dma_chan->device->
+ device_control(ux500_channel->dma_chan,
+ DMA_TERMINATE_ALL, 0);
+ channel->status = MUSB_DMA_STATUS_FREE;
+ }
+ return 0;
+}
+
+static void ux500_dma_controller_stop(struct ux500_dma_controller *controller)
+{
+ struct ux500_dma_channel *ux500_channel;
+ struct dma_channel *channel;
+ u8 ch_num;
+
+ for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
+ channel = &controller->rx_channel[ch_num].channel;
+ ux500_channel = channel->private_data;
+
+ ux500_dma_channel_release(channel);
+
+ if (ux500_channel->dma_chan)
+ dma_release_channel(ux500_channel->dma_chan);
+ }
+
+ for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
+ channel = &controller->tx_channel[ch_num].channel;
+ ux500_channel = channel->private_data;
+
+ ux500_dma_channel_release(channel);
+
+ if (ux500_channel->dma_chan)
+ dma_release_channel(ux500_channel->dma_chan);
+ }
+}
+
+static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
+{
+ struct ux500_dma_channel *ux500_channel = NULL;
+ struct musb *musb = controller->private_data;
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
+ struct ux500_musb_board_data *data;
+ struct dma_channel *dma_channel = NULL;
+ char **chan_names;
+ u32 ch_num;
+ u8 dir;
+ u8 is_tx = 0;
+
+ void **param_array;
+ struct ux500_dma_channel *channel_array;
+ dma_cap_mask_t mask;
+
+ if (!plat) {
+ dev_err(musb->controller, "No platform data\n");
+ return -EINVAL;
+ }
+
+ data = plat->board_data;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Prepare the loop for RX channels */
+ channel_array = controller->rx_channel;
+ param_array = data ? data->dma_rx_param_array : NULL;
+ chan_names = (char **)iep_chan_names;
+
+ for (dir = 0; dir < 2; dir++) {
+ for (ch_num = 0;
+ ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS;
+ ch_num++) {
+ ux500_channel = &channel_array[ch_num];
+ ux500_channel->controller = controller;
+ ux500_channel->ch_num = ch_num;
+ ux500_channel->is_tx = is_tx;
+
+ dma_channel = &(ux500_channel->channel);
+ dma_channel->private_data = ux500_channel;
+ dma_channel->status = MUSB_DMA_STATUS_FREE;
+ dma_channel->max_len = SZ_16M;
+
+ ux500_channel->dma_chan =
+ dma_request_slave_channel(dev, chan_names[ch_num]);
+
+ if (!ux500_channel->dma_chan)
+ ux500_channel->dma_chan =
+ dma_request_channel(mask,
+ data ?
+ data->dma_filter :
+ NULL,
+ param_array ?
+ param_array[ch_num] :
+ NULL);
+
+ if (!ux500_channel->dma_chan) {
+ ERR("Dma pipe allocation error dir=%d ch=%d\n",
+ dir, ch_num);
+
+ /* Release already allocated channels */
+ ux500_dma_controller_stop(controller);
+
+ return -EBUSY;
+ }
+
+ }
+
+ /* Prepare the loop for TX channels */
+ channel_array = controller->tx_channel;
+ param_array = data ? data->dma_tx_param_array : NULL;
+ chan_names = (char **)oep_chan_names;
+ is_tx = 1;
+ }
+
+ return 0;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct ux500_dma_controller *controller = container_of(c,
+ struct ux500_dma_controller, controller);
+
+ ux500_dma_controller_stop(controller);
+ kfree(controller);
+}
+
+struct dma_controller *dma_controller_create(struct musb *musb,
+ void __iomem *base)
+{
+ struct ux500_dma_controller *controller;
+ struct platform_device *pdev = to_platform_device(musb->controller);
+ struct resource *iomem;
+ int ret;
+
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ goto kzalloc_fail;
+
+ controller->private_data = musb;
+
+ /* Save physical address for DMA controller. */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ dev_err(musb->controller, "no memory resource defined\n");
+ goto plat_get_fail;
+ }
+
+ controller->phy_base = (dma_addr_t) iomem->start;
+
+ controller->controller.channel_alloc = ux500_dma_channel_allocate;
+ controller->controller.channel_release = ux500_dma_channel_release;
+ controller->controller.channel_program = ux500_dma_channel_program;
+ controller->controller.channel_abort = ux500_dma_channel_abort;
+ controller->controller.is_compatible = ux500_dma_is_compatible;
+
+ ret = ux500_dma_controller_start(controller);
+ if (ret)
+ goto plat_get_fail;
+ return &controller->controller;
+
+plat_get_fail:
+ kfree(controller);
+kzalloc_fail:
+ return NULL;
+}