aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Barksdale <amatus@amatus.name>2014-08-13 18:32:56 -0500
committerDavid Barksdale <amatus@amatus.name>2014-08-13 18:32:56 -0500
commitcddfc3baae08e24185c7716434452ebbd404cd39 (patch)
tree01e1f933fd8724bf2ad34db47730c98feefc0a7c
parent3918522a523adc3f001cf9c5da327d8fb383c26e (diff)
-rwxr-xr-xarch/powerpc/boot/dts/apollo3g.dts24
-rw-r--r--arch/powerpc/configs/44x/apollo_3G_nas_defconfig129
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/apollo3g-usb.c110
-rwxr-xr-xdrivers/ata/sata_dwc_pmp.c2
-rw-r--r--drivers/leds/led-class-3g.c445
-rw-r--r--drivers/leds/leds-apollo3g.c51
-rw-r--r--drivers/net/ibm_newemac/core.c263
-rw-r--r--drivers/net/ibm_newemac/emac.h1
-rw-r--r--drivers/net/ibm_newemac/tah.c79
-rw-r--r--drivers/net/ibm_newemac/tah.h22
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/usb/core/message.c14
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_cil.c14
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_cil.c.org3237
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c138
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c.org2900
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h31
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h.org660
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c48
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c.org1746
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c233
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c.org696
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c13
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c.org1408
-rw-r--r--drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h4
-rw-r--r--drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h.org304
-rw-r--r--drivers/usb/gadget/dwc_otg/ppc4xx_dma.h2
-rw-r--r--drivers/usb/gadget/dwc_otg/ppc4xx_dma.h.org620
-rw-r--r--drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.c.sdiff2944
-rw-r--r--drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.h.sdiff675
-rw-r--r--drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_intr.c.sdiff1763
-rw-r--r--drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_queue.c.sdiff821
-rw-r--r--fs/splice.c6
-rwxr-xr-ximages/apollo3g.dtbbin0 -> 8373 bytes
-rwxr-xr-ximages/uImagebin0 -> 3466961 bytes
-rw-r--r--net/ipv4/tcp.c3
38 files changed, 19047 insertions, 370 deletions
diff --git a/arch/powerpc/boot/dts/apollo3g.dts b/arch/powerpc/boot/dts/apollo3g.dts
index ffe03c281e6..2bd481d55a6 100755
--- a/arch/powerpc/boot/dts/apollo3g.dts
+++ b/arch/powerpc/boot/dts/apollo3g.dts
@@ -179,6 +179,30 @@
/*RX1 COAL 0xd 0x2*/>;
};
+ USBOTG0: usbotg@bff80000 {
+ compatible = "amcc,usb-otg-405ex";
+ reg = <4 0xbff80000 0x10000>;
+ interrupt-parent = <&USBOTG0>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = </* USB-OTG */ 0 &UIC2 0x1c 4
+ /* HIGH-POWER */ 1 &UIC1 0x1a 8
+ /* DMA */ 2 &UIC0 0xc 4>;
+ };
+
+
+ /* SATA DWC devices */
+ SATA0: sata@bffd1000 {
+ compatible = "amcc,sata-460ex";
+ reg = <4 0xbffd1000 0x800 /* SATA0 */
+ 4 0xbffd0800 0x400>; /* AHBDMA */
+ dma-channel=<0>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <26 4 /* SATA0 */
+ 25 4>; /* AHBDMA */
+ };
SATA1: sata@bffd1800 {
compatible = "amcc,sata-460ex";
reg = <4 0xbffd1800 0x800 /* SATA1 */
diff --git a/arch/powerpc/configs/44x/apollo_3G_nas_defconfig b/arch/powerpc/configs/44x/apollo_3G_nas_defconfig
index d3308fff26e..6690df57a2c 100644
--- a/arch/powerpc/configs/44x/apollo_3G_nas_defconfig
+++ b/arch/powerpc/configs/44x/apollo_3G_nas_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.32.11
-# Fri Aug 6 10:01:35 2010
+# Thu Sep 15 14:29:56 2011
#
# CONFIG_PPC64 is not set
@@ -97,8 +97,7 @@ CONFIG_RCU_FANOUT=32
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
@@ -163,10 +162,10 @@ CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
-# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
-# CONFIG_MODVERSIONS is not set
+CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
CONFIG_LBDAF=y
@@ -206,6 +205,7 @@ CONFIG_DCU_ENABLE=y
# CONFIG_ARCHES is not set
# CONFIG_BEECH is not set
CONFIG_APOLLO3G=y
+# CONFIG_BLUESTONE is not set
# CONFIG_CANYONLANDS is not set
# CONFIG_ACER is not set
# CONFIG_GLACIER is not set
@@ -812,7 +812,7 @@ CONFIG_SATA_DWC_PMP=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_AUTODETECT=y
-# CONFIG_MD_LINEAR is not set
+CONFIG_MD_LINEAR=y
CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_MD_RAID10=y
@@ -1263,7 +1263,7 @@ CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
-CONFIG_USB=y
+CONFIG_USB=m
# CONFIG_USB_DEBUG is not set
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
@@ -1286,7 +1286,10 @@ CONFIG_USB_DEVICE_CLASS=y
#
# CONFIG_USB_C67X00_HCD is not set
# CONFIG_USB_XHCI_HCD is not set
-# CONFIG_USB_EHCI_HCD is not set
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_EHCI_HCD_PPC_OF=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
@@ -1313,7 +1316,7 @@ CONFIG_USB_DEVICE_CLASS=y
#
# also be needed; see USB_STORAGE Help for more info
#
-CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
@@ -1356,13 +1359,14 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
-CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET=m
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
# CONFIG_USB_GADGET_DEBUG_FS is not set
@@ -1377,7 +1381,7 @@ CONFIG_USB_PERIPHERAL_CONTROLLER=y
# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LANGWELL is not set
CONFIG_USB_GADGET_DWC_OTG=y
-CONFIG_USB_DWC_OTG=y
+CONFIG_USB_DWC_OTG=m
CONFIG_DWC_OTG_MODE=y
# CONFIG_DWC_HOST_ONLY is not set
# CONFIG_DWC_DEVICE_ONLY is not set
@@ -1389,8 +1393,7 @@ CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_AUDIO is not set
# CONFIG_USB_ETH is not set
# CONFIG_USB_GADGETFS is not set
-CONFIG_USB_FILE_STORAGE=m
-# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_FILE_STORAGE is not set
# CONFIG_USB_G_SERIAL is not set
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
@@ -1548,7 +1551,8 @@ CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
#
# Caches
@@ -1564,9 +1568,14 @@ CONFIG_INOTIFY_USER=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=y
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
#
# Pseudo filesystems
@@ -1583,8 +1592,8 @@ CONFIG_TMPFS=y
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
+CONFIG_HFS_FS=y
+CONFIG_HFSPLUS_FS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
@@ -1653,13 +1662,14 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_OSF_PARTITION is not set
# CONFIG_AMIGA_PARTITION is not set
# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
+CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
# CONFIG_BSD_DISKLABEL is not set
# CONFIG_MINIX_SUBPARTITION is not set
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
@@ -1668,44 +1678,44 @@ CONFIG_EFI_PARTITION=y
# CONFIG_SYSV68_PARTITION is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
# CONFIG_DLM is not set
# CONFIG_BINARY_PRINTF is not set
@@ -1834,7 +1844,6 @@ CONFIG_SECURITYFS=y
# CONFIG_SECURITY_NETWORK is not set
# CONFIG_SECURITY_PATH is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-# CONFIG_SECURITY_ROOTPLUG is not set
CONFIG_SECURITY_TRUSTEES=y
# CONFIG_SECURITY_TRUSTEES_DEBUG is not set
# CONFIG_SECURITY_TOMOYO is not set
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 084f9b0561a..416e85f1d40 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_WARP) += warp.o
obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
obj-$(CONFIG_BEECH) += beech-usb.o
+obj-$(CONFIG_APOLLO3G) += apollo3g-usb.o
obj-$(CONFIG_APOLLO3G) += apollo3g-gpio.o
obj-$(CONFIG_BLUESTONE) += bluestone-usb.o
diff --git a/arch/powerpc/platforms/44x/apollo3g-usb.c b/arch/powerpc/platforms/44x/apollo3g-usb.c
new file mode 100644
index 00000000000..f3fa2613737
--- /dev/null
+++ b/arch/powerpc/platforms/44x/apollo3g-usb.c
@@ -0,0 +1,110 @@
+/*
+ * AMCC Kilauea USB-OTG wrapper
+ *
+ * Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de>
+ *
+ * Extract the resources (MEM & IRQ) from the dts file and put them
+ * into the platform-device struct for usage in the platform-device
+ * USB-OTG driver.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
+#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
+
+
+/*
+ * Resource template will be filled dynamically with the values
+ * extracted from the dts file
+ */
+static struct resource usb_otg_resources[] = {
+ [0] = {
+ /* 405EX USB-OTG registers */
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* 405EX OTG IRQ */
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* High-Power workaround IRQ */
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* 405EX DMA IRQ */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 dma_mask = 0xffffffffULL;
+
+static struct platform_device usb_otg_device = {
+ .name = "dwc_otg",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(usb_otg_resources),
+ .resource = usb_otg_resources,
+ .dev = {
+ .dma_mask = &dma_mask,
+ .coherent_dma_mask = 0xffffffffULL,
+ }
+};
+
+static struct platform_device *ppc405ex_devs[] __initdata = {
+ &usb_otg_device,
+};
+
+static int __devinit ppc405ex_usb_otg_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct device_node *np = ofdev->node;
+ struct resource res;
+
+ /*
+ * Extract register address reange from device tree and put it into
+ * the platform device structure
+ */
+ if (of_address_to_resource(np, 0, &res)) {
+ printk(KERN_ERR "%s: Can't get USB-OTG register address\n", __func__);
+ return -ENOMEM;
+ }
+ usb_otg_resources[0].start = res.start;
+ usb_otg_resources[0].end = res.end;
+
+ /*
+ * Extract IRQ number(s) from device tree and put them into
+ * the platform device structure
+ */
+ usb_otg_resources[1].start = usb_otg_resources[1].end =
+ irq_of_parse_and_map(np, 0);
+ usb_otg_resources[2].start = usb_otg_resources[2].end =
+ irq_of_parse_and_map(np, 1);
+ usb_otg_resources[3].start = usb_otg_resources[3].end =
+ irq_of_parse_and_map(np, 2);
+ return platform_add_devices(ppc405ex_devs, ARRAY_SIZE(ppc405ex_devs));
+}
+
+static int __devexit ppc405ex_usb_otg_remove(struct of_device *ofdev)
+{
+ /* Nothing to do here */
+ return 0;
+}
+
+static const struct of_device_id ppc405ex_usb_otg_match[] = {
+ { .compatible = "amcc,usb-otg-405ex", },
+ {}
+};
+
+static struct of_platform_driver ppc405ex_usb_otg_driver = {
+ .name = "ppc405ex-usb-otg",
+ .match_table = ppc405ex_usb_otg_match,
+ .probe = ppc405ex_usb_otg_probe,
+ .remove = ppc405ex_usb_otg_remove,
+};
+
+static int __init ppc405ex_usb_otg_init(void)
+{
+ return of_register_platform_driver(&ppc405ex_usb_otg_driver);
+}
+device_initcall(ppc405ex_usb_otg_init);
diff --git a/drivers/ata/sata_dwc_pmp.c b/drivers/ata/sata_dwc_pmp.c
index df3b490f86e..54b69d67e35 100755
--- a/drivers/ata/sata_dwc_pmp.c
+++ b/drivers/ata/sata_dwc_pmp.c
@@ -1110,7 +1110,7 @@ static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc,
#if defined(CONFIG_APM82181)
if (dma_ch == 0) {
/* Buffer mode enabled, FIFO_MODE=0 */
- out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x000000d);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000009);
/* Channel 0 bit[7:5] */
out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
} else if (dma_ch == 1) {
diff --git a/drivers/leds/led-class-3g.c b/drivers/leds/led-class-3g.c
index c0c1c11cbdb..7b8e0fa45ed 100644
--- a/drivers/leds/led-class-3g.c
+++ b/drivers/leds/led-class-3g.c
@@ -20,59 +20,296 @@
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/leds.h>
+#include <linux/errno.h>
#include "leds.h"
+#define MAX_USERS 32
+#define N_COLORS 4
+#define N_EVENTS 3
+#define USR_LEN 81
+#define EVENT_LEN 81
+#define INDEX_LEN 8
+
+
+/* LED users */
+#define EV_NAS_SYSTEM 0 /* Overall system: NAS ready, booting, shutdown... */
+#define EV_DISK_SMART 1 /* Disk SMART including temp., error lba, ...*/
+#define EV_DISK_IO 2 /* Disk read/write error */
+#define EV_RAID_CFG 3 /* RAID setup failure: assembling, formatting, rebuild ...*/
+#define EV_FW_UPDATE 4 /* NAS firmware update */
+#define EV_NETWORK 5 /* Network connectivity error */
+#define EV_VM 6 /* Volume manager */
+
+char Led_user_arr[MAX_USERS][USR_LEN] = { "EV_NAS_SYSTEM", \
+ "EV_DISK_SMART", \
+ "EV_DISK_IO" , \
+ "EV_RAID_CFG" , \
+ "EV_FW_UPDATE" , \
+ "EV_NETWORK" , \
+ "EV_VM", \
+ };
+/* LED event types */
+#define LED_STAT_OK 0 /* Happy user, normal operation */
+#define LED_STAT_ERR 1 /* User error, needs led indication */
+#define LED_STAT_IN_PROG 2 /* User doing something important, needs led indication */
+
+char *Led_ev_arr[] = { "LED_STAT_OK", "LED_STAT_ERR", "LED_STAT_IN_PROG" };
+
+char Color_map[MAX_USERS][N_EVENTS] = { {'g','r','w'}, /* EV_NAS_SYSTEM */ \
+ {'g','y','w'}, /* EV_DISK_SMART */ \
+ {'g','r','w'}, /* EV_DISK_IO */ \
+ {'g','r','w'}, /* EV_RAID_CFG */ \
+ {'g','r','w'}, /* EV_FW_UPDATE */ \
+ {'g','y','w'}, /* EV_NETWORK */ \
+ {'g','r','w'}, /* EV_VM */ \
+ };
+
+char Blink_map[MAX_USERS][N_EVENTS] = { {'n','n','n'}, /* EV_NAS_SYSTEM */ \
+ {'n','y','n'}, /* EV_DISK_SMART */ \
+ {'n','n','n'}, /* EV_DISK_IO */ \
+ {'n','n','n'}, /* EV_RAID_CFG */ \
+ {'n','n','n'}, /* EV_FW_UPDATE */ \
+ {'n','y','n'}, /* EV_NETWORK */ \
+ {'n','n','n'}, /* EV_VM */ \
+ };
+
+u32 Led_error_bits = 0;
+int N_USERS = 7; /* default number of users */
+
static struct class *leds_class;
static void led_update_color(struct led_classdev *led_cdev)
{
- if (led_cdev->color_get)
- led_cdev->color = led_cdev->color_get(led_cdev);
+ if (led_cdev->color_get)
+ led_cdev->color = led_cdev->color_get(led_cdev);
}
static ssize_t led_color_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+ struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- unsigned long state = 9;
-
- switch (buf[0]) {
- case 'r': /* red */
- state = 1;
- break;
- case 'g': /* green */
- state = 2;
- break;
- case 'b': /* blue */
- state = 3;
- break;
- case 'y': /* yellow */
- state = 4;
- break;
- case 'w': /* white */
- state = 5;
- break;
- case 'o': /* off */
- state = 0;
- break;
- default:
- break;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state = 9;
+ char user[USR_LEN], event[EVENT_LEN], index_str[INDEX_LEN], color;
+ int i = 0, j = 0, found = 0, tmp = 0, edit_policy = 0;
+ int reg_user = -1, reg_event = -1, reg_color = -1;
+ const char * cptr = NULL;
+ long int index = -1;
+ char blink;
+ int reg_blink = 'n';
+
+ cptr = &buf[0];
+
+ /* check for 'register' event */
+ // NB: Format of register event is
+ // register:event,status,color
+ if( cptr[8] == ':' ) {
+ if( !memcmp("register", cptr, 8) ) {
+ edit_policy = 1;
+ cptr = &buf[9];
+ }
+ }
+
+ /* parse user name */
+ for( i = 0; i < (USR_LEN -1) && cptr[i]; i++ ) {
+ if( cptr[i] == ',' ) {
+ break;
+ }
+ user[i] = cptr[i];
+ }
+
+ /* null terminate user buf */
+ user[i] = '\0';
+ i++; /* skips the ',' delimiter */
+
+
+ for( j = 0; (j < EVENT_LEN -1) && cptr[i] ; j++,i++ ) {
+ if( (cptr[i] == ',') || (cptr[i] == '\0') || (cptr[i] == '\n') ) {
+ if( cptr[i] == ',' ) {
+ cptr = &cptr[i+1];
+ }
+ break;
+ }
+ event[j] = cptr[i];
+ }
+ /* null terminate event buf */
+ event[j] = '\0';
+
+ /* if editing policy, parse the color */
+ if( edit_policy ) {
+ if( cptr != NULL ) {
+ reg_color = cptr[0]; /* r,g,b,y,w */
+ if( reg_color != 'r' && reg_color != 'g' &&
+ reg_color != 'b' && reg_color != 'y' && reg_color != 'w' ) {
+ reg_color = -1; /* invalid color */
+ }
+
+ /** TBD: Get the value of reg_blink from cptr */
+ }
+ }
+ else {
+ /* scan index for some users */
+ if( !strcmp(user, Led_user_arr[EV_DISK_SMART]) ||
+ !strcmp(user, Led_user_arr[EV_DISK_IO]) ) {
+ if( cptr != NULL ) {
+ for( i = 0; (i < INDEX_LEN -1) && cptr[i] ; i++ ) {
+ if( (cptr[i] == ',') || (cptr[i] == '\0') || (cptr[i] == '\n') ) {
+ break;
+ }
+ index_str[i] = cptr[i];
+ }
+ }
+ }
+
+ /* null terminate index_str */
+ index_str[i] = '\0';
+ if( i ) {
+ tmp = strict_strtol(index_str, 10, &index);
+ if( !tmp && (index >= 0) ) {
+ /*
+ * TODO: insert code to fulfill req's. Currently not required.
+ */
+ /*printk(KERN_INFO "\nindex %ld\n", index);*/
+ }
+ }
+ } /* if( !edit_policy ) */
+
+ /* Validate user and event */
+ found = 0;
+ for( i = 0; i < N_USERS; i++ ) {
+ if( !strcmp( Led_user_arr[i], user ) ) {
+ found = 1;
+ break;
}
+ }
- led_set_color(led_cdev, state);
+ if( found || edit_policy) {
+ reg_user = i;
+ /* new user registration */
+ if( ! found ) {
+ if( N_USERS == MAX_USERS ) {
+ /* only support up to 32 users */
+ return (ssize_t)size;
+ }
+ reg_user = N_USERS++;
+
+ strcpy(Led_user_arr[reg_user], user);
+ }
+ found = 0;
+ for( j = 0; j < N_EVENTS; j++ ) {
+ if( ! strcmp(Led_ev_arr[j], event) ) {
+ if( j == LED_STAT_ERR ) {
+ Led_error_bits |= (1 << i); /* register error for this user */
+ }
+ else if( j == LED_STAT_OK ) {
+ Led_error_bits &= ~(1 << i); /* clear error for this user */
+ }
+ found = 1;
+ reg_event = j;
+ break;
+ }
+ }
+ }
+
+ /* if this is a register event, do just that */
+ if( edit_policy ) {
+ /* valid event above and color */
+ if( (reg_event != -1) && (reg_color != -1) ) {
+ Color_map[reg_user][reg_event] = reg_color;
+
+ /** TBD: Add support for registering blink with register: interface*/
+ reg_blink = 'n';
+ Blink_map[reg_user][reg_event] = reg_blink;
+ }
+ /*printk( KERN_INFO "reg_user = %d, reg_event= %d, reg_color = %c\n", reg_user, reg_event, reg_color, reg_blink);*/
+ return (ssize_t)size;
+ }
+
+ /* Be nice ! support older led mechanism */
+ color = buf[0];
+ blink = 'x';
+
+ /* If valid user and event, retrieve color & blink map */
+ if( found ) {
+ /* if a canceling event and other error(s) existing, don't do anything */
+ if( (j == LED_STAT_OK) && (Led_error_bits != 0) ) {
+ }
+ else {
+ color = Color_map[i][j];
+ blink = Blink_map[i][j];
+ }
+ /*printk(KERN_INFO "\nUser= %s, event= %s, color %c, %08x\n", user, event, color, blink, Led_error_bits);*/
+ }
+
+ switch (color) {
+ case 'r': /* red */
+ state = 1;
+ break;
+ case 'g': /* green */
+ state = 2;
+ break;
+ case 'b': /* blue */
+ state = 3;
+ break;
+ case 'y': /* yellow */
+ state = 4;
+ break;
+ case 'w': /* white */
+ state = 5;
+ break;
+ case 'o': /* off */
+ state = 0;
+ break;
+ default:
+ state = -1;
+ break;
+ }
+
+ /** do nothing if no color change is required */
+ if( state == -1 ) {
+ return (ssize_t)size;
+ }
+
+ // printk(KERN_DEBUG "Calling led_set_color with value %c, blink is %c\n", color, blink);
+ led_set_color( led_cdev, state );
+
+ /** blink the led */
+ {
+ int val = -1;
+
+ printk(KERN_DEBUG "Calling led_set_blink with value %c\n", blink);
+
+ switch( blink ) {
+ case 'y': /** yes */
+ val = 1;
+ break;
+ case 'n': /** no */
+ val = 0;
+ break;
+ case 'f': /** forced */
+ val = 2;
+ break;
+ default:
+ break;
+ }
+
+ if( val >= 0 )
+ {
+ led_set_blink( led_cdev, val );
+ }
+ }
- return (ssize_t)size;
+ return (ssize_t)size;
}
static ssize_t led_color_show(struct device *dev,
- struct device_attribute *attr, char *buf) {
+ struct device_attribute *attr, char *buf) {
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
char * readbuf[] = {"off", "red", "green", "blue", "yellow", "white"} ;
- /* no lock needed for this */
- led_update_color(led_cdev);
+ /* no lock needed for this */
+ led_update_color(led_cdev);
- return sprintf(buf, "%s\n", readbuf[led_cdev->color]);
+ return sprintf(buf, "%s\n", readbuf[led_cdev->color]);
}
static ssize_t led_blink_show(struct device *dev, struct device_attribute *attr,
@@ -86,11 +323,11 @@ static ssize_t led_blink_show(struct device *dev, struct device_attribute *attr,
else if (led_cdev->blink == 1 ){
blinkStr = "yes";
}
- return sprintf(buf, "%s\n", blinkStr);
+ return sprintf(buf, "%s\n", blinkStr);
}
static ssize_t led_blink_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size) {
+ struct device_attribute *attr, const char *buf, size_t size) {
int val = 0;
struct led_classdev * led_cdev = dev_get_drvdata(dev);
@@ -109,11 +346,11 @@ static ssize_t led_blink_store(struct device *dev,
}
static ssize_t led_max_brightness_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", led_cdev->max_brightness);
+ return sprintf(buf, "%u\n", led_cdev->max_brightness);
}
/*static DEVICE_ATTR(brightness, 0644, led_brightness_show, led_brightness_store);*/
@@ -130,8 +367,8 @@ static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
*/
void led_classdev_suspend(struct led_classdev *led_cdev)
{
- led_cdev->flags |= LED_SUSPENDED;
- led_cdev->color_set(led_cdev, 0);
+ led_cdev->flags |= LED_SUSPENDED;
+ led_cdev->color_set(led_cdev, 0);
}
EXPORT_SYMBOL_GPL(led_classdev_suspend);
@@ -141,29 +378,29 @@ EXPORT_SYMBOL_GPL(led_classdev_suspend);
*/
void led_classdev_resume(struct led_classdev *led_cdev)
{
- led_cdev->color_set(led_cdev, led_cdev->color);
- led_cdev->flags &= ~LED_SUSPENDED;
+ led_cdev->color_set(led_cdev, led_cdev->color);
+ led_cdev->flags &= ~LED_SUSPENDED;
}
EXPORT_SYMBOL_GPL(led_classdev_resume);
static int led_suspend(struct device *dev, pm_message_t state)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
- if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
- led_classdev_suspend(led_cdev);
+ if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+ led_classdev_suspend(led_cdev);
- return 0;
+ return 0;
}
static int led_resume(struct device *dev)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
- if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
- led_classdev_resume(led_cdev);
+ if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+ led_classdev_resume(led_cdev);
- return 0;
+ return 0;
}
/**
@@ -173,62 +410,62 @@ static int led_resume(struct device *dev)
*/
int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
- int rc;
+ int rc;
- led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
- "%s", led_cdev->name);
- if (IS_ERR(led_cdev->dev))
- return PTR_ERR(led_cdev->dev);
+ led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
+ "%s", led_cdev->name);
+ if (IS_ERR(led_cdev->dev))
+ return PTR_ERR(led_cdev->dev);
- /* register the attributes */
- rc = device_create_file(led_cdev->dev, &dev_attr_color);
- if (rc)
- goto err_out;
+ /* register the attributes */
+ rc = device_create_file(led_cdev->dev, &dev_attr_color);
+ if (rc)
+ goto err_out;
- rc = device_create_file(led_cdev->dev, &dev_attr_blink);
- if (rc)
- goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_blink);
+ if (rc)
+ goto err_out;
#ifdef CONFIG_LEDS_TRIGGERS
- init_rwsem(&led_cdev->trigger_lock);
+ init_rwsem(&led_cdev->trigger_lock);
#endif
- /* add to the list of leds */
- down_write(&leds_list_lock);
- list_add_tail(&led_cdev->node, &leds_list);
- up_write(&leds_list_lock);
+ /* add to the list of leds */
+ down_write(&leds_list_lock);
+ list_add_tail(&led_cdev->node, &leds_list);
+ up_write(&leds_list_lock);
- if (!led_cdev->max_brightness)
- led_cdev->max_brightness = LED_FULL;
+ if (!led_cdev->max_brightness)
+ led_cdev->max_brightness = LED_FULL;
- rc = device_create_file(led_cdev->dev, &dev_attr_max_brightness);
- if (rc)
- goto err_out_attr_max;
+ rc = device_create_file(led_cdev->dev, &dev_attr_max_brightness);
+ if (rc)
+ goto err_out_attr_max;
- led_update_color(led_cdev);
+ led_update_color(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
- rc = device_create_file(led_cdev->dev, &dev_attr_trigger);
- if (rc)
- goto err_out_led_list;
+ rc = device_create_file(led_cdev->dev, &dev_attr_trigger);
+ if (rc)
+ goto err_out_led_list;
- led_trigger_set_default(led_cdev);
+ led_trigger_set_default(led_cdev);
#endif
- printk(KERN_INFO "Registered led device: %s\n",
- led_cdev->name);
+ printk(KERN_INFO "Registered led device: %s\n",
+ led_cdev->name);
- return 0;
+ return 0;
#ifdef CONFIG_LEDS_TRIGGERS
err_out_led_list:
- device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
+ device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
#endif
err_out_attr_max:
- device_remove_file(led_cdev->dev, &dev_attr_color);
- list_del(&led_cdev->node);
+ device_remove_file(led_cdev->dev, &dev_attr_color);
+ list_del(&led_cdev->node);
err_out:
- device_unregister(led_cdev->dev);
- return rc;
+ device_unregister(led_cdev->dev);
+ return rc;
}
EXPORT_SYMBOL_GPL(led_classdev_register);
@@ -240,37 +477,37 @@ EXPORT_SYMBOL_GPL(led_classdev_register);
*/
void led_classdev_unregister(struct led_classdev *led_cdev)
{
- device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
- device_remove_file(led_cdev->dev, &dev_attr_color);
+ device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
+ device_remove_file(led_cdev->dev, &dev_attr_color);
#ifdef CONFIG_LEDS_TRIGGERS
- device_remove_file(led_cdev->dev, &dev_attr_trigger);
- down_write(&led_cdev->trigger_lock);
- if (led_cdev->trigger)
- led_trigger_set(led_cdev, NULL);
- up_write(&led_cdev->trigger_lock);
+ device_remove_file(led_cdev->dev, &dev_attr_trigger);
+ down_write(&led_cdev->trigger_lock);
+ if (led_cdev->trigger)
+ led_trigger_set(led_cdev, NULL);
+ up_write(&led_cdev->trigger_lock);
#endif
- device_unregister(led_cdev->dev);
+ device_unregister(led_cdev->dev);
- down_write(&leds_list_lock);
- list_del(&led_cdev->node);
- up_write(&leds_list_lock);
+ down_write(&leds_list_lock);
+ list_del(&led_cdev->node);
+ up_write(&leds_list_lock);
}
EXPORT_SYMBOL_GPL(led_classdev_unregister);
static int __init leds_init(void)
{
- leds_class = class_create(THIS_MODULE, "leds");
- if (IS_ERR(leds_class))
- return PTR_ERR(leds_class);
- leds_class->suspend = led_suspend;
- leds_class->resume = led_resume;
- return 0;
+ leds_class = class_create(THIS_MODULE, "leds");
+ if (IS_ERR(leds_class))
+ return PTR_ERR(leds_class);
+ leds_class->suspend = led_suspend;
+ leds_class->resume = led_resume;
+ return 0;
}
static void __exit leds_exit(void)
{
- class_destroy(leds_class);
+ class_destroy(leds_class);
}
subsys_initcall(leds_init);
diff --git a/drivers/leds/leds-apollo3g.c b/drivers/leds/leds-apollo3g.c
index 6e796dc7c24..5fc791383c3 100644
--- a/drivers/leds/leds-apollo3g.c
+++ b/drivers/leds/leds-apollo3g.c
@@ -157,24 +157,25 @@ static int a3g_led_blink(struct led_classdev *led_cdev, int value) {
/*
* if forced blink, don't set blink_flag
*/
- if( blink_flag == 2 ) {
- return 0;
- }
+ if( blink_flag == 2 ) {
+ return 0;
+ }
/*spin_lock_irqsave(&led_lock, flags);*/
/* user wants to blink led */
if( value == 1 ) {
- wake_up(&ts_wait);
blink_flag = 1;
-
+ wake_up(&ts_wait);
}
else if( value == 0) {
blink_flag = 0;
}
else if( value == 2 ) {
- wake_up(&ts_wait);
blink_flag = 2;
+ wake_up(&ts_wait);
}
+ // printk(KERN_DEBUG "%s: Got blink signal - input blink value %d, blink_flag %d\n", __func__, value, blink_flag);
+
/* spin_unlock_irqrestore(&led_lock, flags);*/
return 0;
@@ -193,12 +194,12 @@ void signal_hdd_led(int flag, int color) {
if( blink_flag == 2 ) {
return;
}
-
- if( flag && /* blink or not */
+
+ if( flag && /* blink == yes */
(led_state.cur_color == _3G_LED_GREEN)
#if 0
- (led_state.cur_color != _3G_LED_WHITE) && /* don't touch fw update led */
- (led_state.cur_color != _3G_LED_RED) && /* don't touch system error led */
+ (led_state.cur_color != _3G_LED_WHITE) && /* don't touch fw update led */
+ (led_state.cur_color != _3G_LED_RED) && /* don't touch system error led */
!((led_state.cur_color == _3G_LED_BLUE) && (led_state.cur_action == _BLINK_YES)) && /* leave identity alone */
(color != _3G_LED_RED)
#endif
@@ -209,23 +210,27 @@ void signal_hdd_led(int flag, int color) {
blink_flag = 1;
wake_up(&ts_wait);
}
- else {
- blink_flag = 0;
+ else if( ! flag && /* blink == no */
+ ( led_state.cur_color == _3G_LED_GREEN ) )
+ {
+ blink_flag = 0;
}
+
+ //printk(KERN_DEBUG "%s: Got HDD signal - color %d, blink %d, blink_flag %d\n", __func__, color, flag, blink_flag);
}
static struct led_classdev a3g_led_dev = {
.name = "a3g_led",
- .color_set = a3g_led_set,
- .color_get = a3g_led_get,
- .blink_set_3g = a3g_led_blink,
+ .color_set = a3g_led_set,
+ .color_get = a3g_led_get,
+ .blink_set_3g = a3g_led_blink,
};
/****************************************************/
static int __init a3g_led_probe(struct platform_device *pdev ) {
- /* Not used */
- return 0;
+ /* Not used */
+ return 0;
}
/****************************************************/
@@ -239,12 +244,12 @@ static int __devexit a3g_led_remove(struct platform_device *pdev){
return 0;
}
static struct platform_driver a3g_led_driver = {
- .probe = a3g_led_probe,
- .remove = __devexit_p(a3g_led_remove),
- .driver = {
- .name = "a3g-leds",
+ .probe = a3g_led_probe,
+ .remove = __devexit_p(a3g_led_remove),
+ .driver = {
+ .name = "a3g-leds",
.owner = THIS_MODULE,
- },
+ },
};
#if 0
@@ -287,7 +292,7 @@ static int a3g_led_blink_thread( void * data ) {
led_state.cur_action = _BLINK_NO;
/* always set current color before blinking */
- a3g_led_set( NULL, led_state.cur_color);
+ a3g_led_set( NULL, led_state.cur_color);
wait_event_freezable_timeout(ts_wait, blink_flag || kthread_should_stop(), MAX_SCHEDULE_TIMEOUT);
if( led_port ) {
readval = readb(led_port);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index eabad755e4a..babb42a5f23 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/tcp.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
@@ -41,6 +42,7 @@
#include <linux/of.h>
#include <linux/sysctl.h>
+#include <net/tcp.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -1261,6 +1263,27 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu)
}
if (!ret) {
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
+ struct tah_instance *tdev;
+ int i, adj_val = 0;
+ u32 ss_defs[] = TAH_SS_DEFAULT;
+
+ tdev = dev_get_drvdata(&dev->tah_dev->dev);
+ if (new_mtu > ss_defs[0]) {
+ /* add the current MTU */
+ tah_set_ssr(dev->tah_dev, 0, new_mtu);
+ /* update the adjustment var */
+ adj_val = 1;
+ }
+ /* don't allow values to exceed new MTU */
+ for (i = adj_val; i < TAH_NO_SSR;i++) {
+ if (ss_defs[i-adj_val] > new_mtu)
+ tah_set_ssr(dev->tah_dev, i,
+ new_mtu);
+ else tah_set_ssr(dev->tah_dev, i,
+ ss_defs[i-adj_val]);
+ }
+ }
ndev->mtu = new_mtu;
dev->rx_skb_size = emac_rx_skb_size(new_mtu);
dev->rx_sync_size = emac_rx_sync_size(new_mtu);
@@ -1597,12 +1620,64 @@ static int emac_close(struct net_device *ndev)
static inline u16 emac_tx_csum(struct emac_instance *dev,
struct sk_buff *skb)
{
+ u32 seg_size = 0;
+ int i = 0;
+ int ssr_idx = -1;
+ u32 curr_seg;
+ __be16 protocol;
+ int is_tcp = 0;
+ struct tah_instance *tah_dev;
+
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
(skb->ip_summed == CHECKSUM_PARTIAL)) {
++dev->stats.tx_packets_csum;
- if (skb_is_gso(skb))
- return EMAC_TX_CTRL_TAH_SSR0;
- else
+
+ /* Only support TSO for TCP */
+ protocol = skb->protocol;
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ is_tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ is_tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+ break;
+ default:
+ is_tcp = 0;
+ break;
+ }
+
+ if (skb_is_gso(skb) && is_tcp) {
+ /* Get the MTU */
+ seg_size = skb_is_gso(skb) + tcp_hdrlen(skb)
+ + skb_network_header_len(skb);
+ /* Get the best suitable MTU */
+ tah_dev = dev_get_drvdata(&dev->tah_dev->dev);
+ ssr_idx = -1;
+ for (i = 0; i < TAH_NO_SSR; i++) {
+ curr_seg = tah_dev->ss_array[tah_dev->ss_order[i]];
+ if ( (curr_seg > dev->ndev->mtu) ||
+ (curr_seg > seg_size) )
+ continue;
+ if (curr_seg <= seg_size) {
+ ssr_idx = tah_dev->ss_order[i];
+ break;
+ }
+ }
+
+ if (ssr_idx == -1) {
+ printk(KERN_WARNING "No suitable TAH_SSRx "
+ "for segmentation size %d\n", seg_size);
+ /* Avoid using TSO feature in this case */
+ return EMAC_TX_CTRL_TAH_CSUM;
+ }
+
+#if 0
+ printk("Select ssr index %d segment size %d SSR value 0x%04x\n",
+ ssr_idx, tah_dev->ss_array[ssr_idx],
+ EMAC_TX_CTRL_TAH_SSR(ssr_idx));
+#endif
+ return EMAC_TX_CTRL_TAH_SSR(ssr_idx);
+ } else
return EMAC_TX_CTRL_TAH_CSUM;
}
return 0;
@@ -2812,6 +2887,164 @@ static ssize_t store_emi_fix_enable(struct device *dev,
#endif
+#if defined(CONFIG_IBM_NEW_EMAC_TAH)
+static ssize_t show_tah_ssr0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ return sprintf(buf, "%d\n",
+ TAH_SSR_2_SS(tah_get_ssr(dev_ins->tah_dev, 0)) << 1);
+
+ return 0;
+}
+
+static ssize_t store_tah_ssr0(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ tah_set_ssr(dev_ins->tah_dev, 0, tmp);
+
+ return count;
+}
+
+static ssize_t show_tah_ssr1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ return sprintf(buf, "%d\n",
+ TAH_SSR_2_SS(tah_get_ssr(dev_ins->tah_dev, 1)) << 1);
+
+ return 0;
+}
+
+static ssize_t store_tah_ssr1(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ tah_set_ssr(dev_ins->tah_dev, 1, tmp);
+
+ return count;
+}
+
+static ssize_t show_tah_ssr2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ return sprintf(buf, "%d\n",
+ TAH_SSR_2_SS(tah_get_ssr(dev_ins->tah_dev, 2)) << 1);
+
+ return 0;
+}
+
+static ssize_t store_tah_ssr2(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ tah_set_ssr(dev_ins->tah_dev, 2, tmp);
+
+ return count;
+}
+
+static ssize_t show_tah_ssr3(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ return sprintf(buf, "%d\n",
+ TAH_SSR_2_SS(tah_get_ssr(dev_ins->tah_dev, 3)) << 1);
+
+ return 0;
+}
+
+static ssize_t store_tah_ssr3(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ tah_set_ssr(dev_ins->tah_dev, 3, tmp);
+
+ return count;
+}
+
+static ssize_t show_tah_ssr4(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ return sprintf(buf, "%d\n",
+ TAH_SSR_2_SS(tah_get_ssr(dev_ins->tah_dev, 4)) << 1);
+
+ return 0;
+}
+
+static ssize_t store_tah_ssr4(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ tah_set_ssr(dev_ins->tah_dev, 4, tmp);
+
+ return count;
+}
+
+static ssize_t show_tah_ssr5(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ return sprintf(buf, "%d\n",
+ TAH_SSR_2_SS(tah_get_ssr(dev_ins->tah_dev, 5)) << 1);
+
+ return 0;
+}
+
+static ssize_t store_tah_ssr5(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ if (emac_has_feature(dev_ins, EMAC_FTR_HAS_TAH))
+ tah_set_ssr(dev_ins->tah_dev, 5, tmp);
+
+ return count;
+}
+#endif
+
#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
static DEVICE_ATTR(coalesce_param_tx_count,
S_IRUGO | S_IWUSR, show_tx_count, store_tx_count);
@@ -2823,6 +3056,21 @@ static DEVICE_ATTR(coalesce_param_rx_time,
S_IRUGO | S_IWUSR, show_rx_time, store_rx_time);
#endif
+#if defined(CONFIG_IBM_NEW_EMAC_TAH)
+static DEVICE_ATTR(tah_ssr0,
+ S_IRUGO | S_IWUSR, show_tah_ssr0, store_tah_ssr0);
+static DEVICE_ATTR(tah_ssr1,
+ S_IRUGO | S_IWUSR, show_tah_ssr1, store_tah_ssr1);
+static DEVICE_ATTR(tah_ssr2,
+ S_IRUGO | S_IWUSR, show_tah_ssr2, store_tah_ssr2);
+static DEVICE_ATTR(tah_ssr3,
+ S_IRUGO | S_IWUSR, show_tah_ssr3, store_tah_ssr3);
+static DEVICE_ATTR(tah_ssr4,
+ S_IRUGO | S_IWUSR, show_tah_ssr4, store_tah_ssr4);
+static DEVICE_ATTR(tah_ssr5,
+ S_IRUGO | S_IWUSR, show_tah_ssr5, store_tah_ssr5);
+#endif
+
#if defined(CONFIG_APM82181)
#if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
static DEVICE_ATTR(emi_fix_enable, S_IRUGO | S_IWUSR,
@@ -2838,6 +3086,15 @@ static struct attribute *ibm_newemac_attr[] = {
&dev_attr_coalesce_param_rx_time.attr,
#endif
+#if defined(CONFIG_IBM_NEW_EMAC_TAH)
+ &dev_attr_tah_ssr0.attr,
+ &dev_attr_tah_ssr1.attr,
+ &dev_attr_tah_ssr2.attr,
+ &dev_attr_tah_ssr3.attr,
+ &dev_attr_tah_ssr4.attr,
+ &dev_attr_tah_ssr5.attr,
+#endif
+
#if defined(CONFIG_APM82181)
#if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
&dev_attr_emi_fix_enable.attr,
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index bc54a228556..32389a03586 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -320,5 +320,6 @@ struct emac_regs {
#define EMAC_TX_CTRL_TAH_SSR3 0x0008
#define EMAC_TX_CTRL_TAH_SSR4 0x000a
#define EMAC_TX_CTRL_TAH_SSR5 0x000c
+#define EMAC_TX_CTRL_TAH_SSR(idx) (((idx) + 1) << 1)
#define EMAC_TX_CTRL_TAH_CSUM 0x000e
#endif /* __IBM_NEWEMAC_H */
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 8d31b4a2c91..4ed836a8d82 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -49,6 +49,7 @@ void tah_reset(struct of_device *ofdev)
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
struct tah_regs __iomem *p = dev->base;
int n;
+ u32 ss_arr[] = TAH_SS_DEFAULT;
/* Reset TAH */
out_be32(&p->mr, TAH_MR_SR);
@@ -63,6 +64,16 @@ void tah_reset(struct of_device *ofdev)
out_be32(&p->mr,
TAH_MR_CVR | TAH_MR_ST_256 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
TAH_MR_DIG);
+
+ /* Re-initialize SSRx values */
+ for (n=0; n < TAH_NO_SSR; n++) {
+ dev->ss_order[n] = n;
+ }
+
+ for (n=0; n < TAH_NO_SSR; n++) {
+ tah_set_ssr(ofdev, n, ss_arr[n]);
+ }
+
}
int tah_get_regs_len(struct of_device *ofdev)
@@ -86,6 +97,74 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
return regs + 1;
}
+void tah_set_ssr(struct of_device *ofdev, int index, int seg_size)
+{
+ struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct tah_regs __iomem *p = dev->base;
+ u32 ssr_tmp[TAH_NO_SSR];
+ int i = 0;
+ int j = 0;
+ u32 tmp_val;
+
+ if ((index < 0) || (index > 5)) return;
+ mutex_lock(&dev->lock);
+ /* TAH segment size reg defines the number of half words */
+ out_be32(&p->ssr0 + index, SS_2_TAH_SSR(seg_size >> 1));
+ dev->ss_array[index] = seg_size & 0x3ffe;
+
+ /*
+ * Sort the TAH_SSRx values and store the index in
+ * ss_order array in high-to-low order
+ */
+ for (i=0; i < TAH_NO_SSR; i++) {
+ ssr_tmp[i] = dev->ss_array[i];
+ dev->ss_order[i] = i;
+ }
+ /* Simple bubble short */
+ for (i =0; i < (TAH_NO_SSR-1); i++)
+ for (j = i+1; j < TAH_NO_SSR; j++) {
+ if (ssr_tmp[i] < ssr_tmp[j]) {
+ /* Swap ssr_tmp[] values */
+ tmp_val = ssr_tmp[i];
+ ssr_tmp[i] = ssr_tmp[j];
+ ssr_tmp[j] = tmp_val;
+ /* Swap order array values */
+ tmp_val = dev->ss_order[i];
+ dev->ss_order[i] = dev->ss_order[j];
+ dev->ss_order[j] = tmp_val;
+ }
+ }
+#if 0
+ printk(KERN_DEBUG "%s: Setting TAH_SSR%d[SS] to %d\n",
+ ofdev->node->full_name, index,
+ TAH_SSR_2_SS(in_be32(&p->ssr0+index)));
+ printk("SSRx array: ");
+ for (i = 0; i < TAH_NO_SSR; i++)
+ printk("%d ", ssr_tmp[i]);
+ printk("\n");
+
+ printk("SSRx order: ");
+ for (i = 0; i < TAH_NO_SSR; i++)
+ printk("%d ", dev->ss_order[i]);
+ printk("\n");
+#endif
+ mutex_unlock(&dev->lock);
+}
+
+u32 tah_get_ssr(struct of_device *ofdev, int index)
+{
+ struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct tah_regs __iomem *p = dev->base;
+ u32 ret = 0;
+
+ if ((index < 0) || (index > 5)) return 0;
+ mutex_lock(&dev->lock);
+ ret = (in_be32(&p->ssr0 + index));
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
static int __devinit tah_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
diff --git a/drivers/net/ibm_newemac/tah.h b/drivers/net/ibm_newemac/tah.h
index a068b5658da..1246ea3f911 100644
--- a/drivers/net/ibm_newemac/tah.h
+++ b/drivers/net/ibm_newemac/tah.h
@@ -38,9 +38,26 @@ struct tah_regs {
/* TAH device */
+/*
+ * Default MTU values for common networks.
+ * Note that the first value may not correct as
+ * we will use the device's current MTU for SSR0
+ */
+#define TAH_SS_DEFAULT { 1500, \
+ 1400, \
+ 1280, \
+ 1006, \
+ 576, \
+ 68 }
+#define TAH_NO_SSR 6
struct tah_instance {
struct tah_regs __iomem *base;
+ /* Current setting for TAHx_SSRx */
+ u32 ss_array[TAH_NO_SSR];
+ /* List of indexes of ordered TAH_x_SSRx values (from high to low)*/
+ u32 ss_order[TAH_NO_SSR];
+
/* Only one EMAC whacks us at a time */
struct mutex lock;
@@ -69,6 +86,9 @@ struct tah_instance {
#define TAH_MR_TFS_10KB 0x00a00000
#define TAH_MR_DTFP 0x00100000
#define TAH_MR_DIG 0x00080000
+#define TAH_SSR_2_SS(val) (((val) >> 17) & 0x1fff)
+/* s is number of half words */
+#define SS_2_TAH_SSR(s) (((s) & 0x1fff) << 17)
#ifdef CONFIG_IBM_NEW_EMAC_TAH
@@ -79,6 +99,8 @@ extern void tah_detach(struct of_device *ofdev, int channel);
extern void tah_reset(struct of_device *ofdev);
extern int tah_get_regs_len(struct of_device *ofdev);
extern void *tah_dump_regs(struct of_device *ofdev, void *buf);
+extern void tah_set_ssr(struct of_device *ofdev, int index, int seg_size);
+extern u32 tah_get_ssr(struct of_device *ofdev, int index);
#else
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 392d8db3390..147c22e418a 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -476,8 +476,8 @@ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
* sdev_rd_attr: create a function and attribute variable for a
* read/write field.
*/
-#define sdev_rw_attr(field, format_string) \
- sdev_show_function(field, format_string) \
+#define sdev_rw_attr(field, r_format_string, w_format_string) \
+ sdev_show_function(field, r_format_string) \
\
static ssize_t \
sdev_store_##field (struct device *dev, struct device_attribute *attr, \
@@ -485,7 +485,7 @@ sdev_store_##field (struct device *dev, struct device_attribute *attr, \
{ \
struct scsi_device *sdev; \
sdev = to_scsi_device(dev); \
- snscanf (buf, 20, format_string, &sdev->field); \
+ sscanf (buf, w_format_string, &sdev->field); \
return count; \
} \
static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
@@ -539,7 +539,7 @@ static int scsi_sdev_check_buf_bit(const char *buf)
sdev_rd_attr (device_blocked, "%d\n");
sdev_rd_attr (queue_depth, "%d\n");
sdev_rd_attr (type, "%d\n");
-sdev_rd_attr (scsi_level, "%d\n");
+sdev_rw_attr (scsi_level, "%d\n", "%hhd\n");
sdev_rd_attr (vendor, "%.8s\n");
sdev_rd_attr (model, "%.16s\n");
sdev_rd_attr (rev, "%.4s\n");
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 980a8d27fa5..5f7ae254f0b 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1185,13 +1185,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
- dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
- skip_ep0 ? "non-ep0" : "all");
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
-
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
*/
@@ -1221,6 +1214,13 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
if (dev->state == USB_STATE_CONFIGURED)
usb_set_device_state(dev, USB_STATE_ADDRESS);
}
+
+ dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
+ skip_ep0 ? "non-ep0" : "all");
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, true);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
}
/**
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index ab2d3e7c3f2..7d6510a0895 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -448,6 +448,8 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
return dev;
}
+
+EXPORT_SYMBOL_GPL(usb_alloc_dev);
/**
* usb_get_dev - increments the reference count of the usb device structure
* @dev: the device being referenced
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c
index 61a8879fc10..1ba37fe7eba 100644
--- a/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c
@@ -1838,7 +1838,7 @@ void ppc4xx_start_plb_dma(dwc_otg_core_if_t *_core_if, void *src, void *dst, uns
mtdcr(DCRN_DMACR0 + (dma_ch * 8), control);
-#ifdef OTG_PLB_DMA_DBG
+#ifdef CONFIG_OTG_PLB_DMA_DBG
ppc4xx_dump_dma(dma_ch);
#endif
ppc4xx_enable_dma(dma_ch);
@@ -1917,7 +1917,7 @@ void dwc_otg_hc_write_packet(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
while (mfdcr(DCRN_DMACR0 + (PLB_DMA_CH*8)) & DMA_CE_ENABLE) {
}
dma_sts = (uint32_t)ppc4xx_get_dma_status();
-#ifdef OTG_PLB_DMA_DBG
+#ifdef CONFIG_OTG_PLB_DMA_DBG
if (!(dma_sts & DMA_CS0)) {
printk("Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
}
@@ -1926,7 +1926,7 @@ void dwc_otg_hc_write_packet(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
printk("Status (Channel Error) 0x%08x\n", mfdcr(DCRN_DMASR));
}
ppc4xx_clr_dma_status(PLB_DMA_CH);
-#ifdef OTG_PLB_DMA_DBG
+#ifdef CONFIG_OTG_PLB_DMA_DBG
printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR)); /* vj_dbg */
#endif
@@ -2687,7 +2687,7 @@ void dwc_otg_ep_write_packet(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep,
while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE) {
}
dma_sts = (uint32_t)ppc4xx_get_dma_status();
-#ifdef OTG_PLB_DMA_DBG
+#ifdef CONFIG_OTG_PLB_DMA_DBG
if (!(dma_sts & DMA_CS0)) {
printk("DMA Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
}
@@ -2696,7 +2696,7 @@ void dwc_otg_ep_write_packet(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep,
printk("DMA Status (Channel 0 Error) 0x%08x\n", mfdcr(DCRN_DMASR));
}
ppc4xx_clr_dma_status(PLB_DMA_CH);
-#ifdef OTG_PLB_DMA_DBG
+#ifdef CONFIG_OTG_PLB_DMA_DBG
printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR)); /* vj_dbg */
#endif
#endif /* CONFIG_OTG_PLB_DMA_TASKLET */
@@ -2841,7 +2841,7 @@ void dwc_otg_read_packet(dwc_otg_core_if_t * _core_if,
while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE) {
}
dma_sts = (uint32_t)ppc4xx_get_dma_status();
-#ifdef OTG_PLB_DMA_DBG
+#ifdef CONFIG_OTG_PLB_DMA_DBG
if (!(dma_sts & DMA_CS0)) {
printk("DMA Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
}
@@ -2850,7 +2850,7 @@ void dwc_otg_read_packet(dwc_otg_core_if_t * _core_if,
printk("DMA Status (Channel 0 Error) 0x%08x\n", mfdcr(DCRN_DMASR));
}
ppc4xx_clr_dma_status(PLB_DMA_CH);
-#ifdef OTG_PLB_DMA_DBG
+#ifdef CONFIG_OTG_PLB_DMA_DBG
printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR));
printk(" Rxed buffer \n");
for( i=0; i< _bytes; i++) {
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c.org b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c.org
new file mode 100644
index 00000000000..61a8879fc10
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c.org
@@ -0,0 +1,3237 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil.c $
+ * $Revision: #24 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the DWC_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * The CIL manages the memory map for the core so that the HCD and PCD
+ * don't have to do this separately. It also handles basic tasks like
+ * reading/writing the registers and data FIFOs in the controller.
+ * Some of the data access functions provide encapsulation of several
+ * operations required to perform a task, such as writing multiple
+ * registers to start a transfer. Finally, the CIL performs basic
+ * services that are not specific to either the host or device modes
+ * of operation. These services include management of the OTG Host
+ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
+ * Diagnostic API is also provided to allow testing of the controller
+ * hardware.
+ *
+ * The Core Interface Layer has the following requirements:
+ * - Provides basic controller operations.
+ * - Minimal use of OS services.
+ * - The OS services used will be abstracted by using inline functions
+ * or macros.
+ *
+ */
+#include <asm/unaligned.h>
+#ifdef CONFIG_DWC_DEBUG
+#include <linux/jiffies.h>
+#endif /* */
+
+#include <asm/dcr.h>
+
+#include "linux/dwc_otg_plat.h"
+#include "dwc_otg_regs.h"
+#include "dwc_otg_cil.h"
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+atomic_t release_later = ATOMIC_INIT(0);
+#endif
+/**
+ * This function is called to initialize the DWC_otg CSR data
+ * structures. The register addresses in the device and host
+ * structures are initialized from the base address supplied by the
+ * caller. The calling function must make the OS calls to get the
+ * base address of the DWC_otg controller registers. The core_params
+ * argument holds the parameters that specify how the core should be
+ * configured.
+ *
+ * @param[in] _reg_base_addr Base address of DWC_otg core registers
+ * @param[in] _core_params Pointer to the core configuration parameters
+ *
+ */
+dwc_otg_core_if_t * dwc_otg_cil_init(const uint32_t * _reg_base_addr,
+ dwc_otg_core_params_t *_core_params)
+{
+ dwc_otg_core_if_t * core_if = 0;
+ dwc_otg_dev_if_t * dev_if = 0;
+ dwc_otg_host_if_t * host_if = 0;
+ uint8_t * reg_base = (uint8_t *) _reg_base_addr;
+ int i = 0;
+ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, _reg_base_addr,
+ _core_params);
+ core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL);
+ if (core_if == 0) {
+ DWC_DEBUGPL(DBG_CIL,"Allocation of dwc_otg_core_if_t failed\n");
+ return 0;
+ }
+ memset(core_if, 0, sizeof(dwc_otg_core_if_t));
+ core_if->core_params = _core_params;
+ core_if->core_global_regs = (dwc_otg_core_global_regs_t *) reg_base;
+
+ /*
+ * Allocate the Device Mode structures.
+ */
+ dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL);
+ if (dev_if == 0) {
+ DWC_DEBUGPL(DBG_CIL,"Allocation of dwc_otg_dev_if_t failed\n");
+ kfree(core_if);
+ return 0;
+ }
+ dev_if->dev_global_regs = (dwc_otg_device_global_regs_t *)(reg_base +
+ DWC_DEV_GLOBAL_REG_OFFSET);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *)
+ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET));
+ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *)
+ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET));
+ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", i,
+ &dev_if->in_ep_regs[i]->diepctl);
+ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", i,
+ &dev_if->out_ep_regs[i]->doepctl);
+ }
+ dev_if->speed = 0; // unknown
+ core_if->dev_if = dev_if;
+
+ /*
+ * Allocate the Host Mode structures.
+ */
+ host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL);
+ if (host_if == 0) {
+ DWC_DEBUGPL(DBG_CIL,"Allocation of dwc_otg_host_if_t failed\n");
+ kfree(dev_if);
+ kfree(core_if);
+ return 0;
+ }
+ host_if->host_global_regs = (dwc_otg_host_global_regs_t *)
+ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
+ host_if->hprt0 = (uint32_t *) (reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
+
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ host_if->hc_regs[i] = (dwc_otg_hc_regs_t *)
+ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + (i * DWC_OTG_CHAN_REGS_OFFSET));
+ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", i,&host_if->hc_regs[i]->hcchar);
+ }
+
+ host_if->num_host_channels = MAX_EPS_CHANNELS;
+ core_if->host_if = host_if;
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ core_if->data_fifo[i] =
+ (uint32_t *) (reg_base + DWC_OTG_DATA_FIFO_OFFSET +
+ (i * DWC_OTG_DATA_FIFO_SIZE));
+ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", i,
+ (unsigned)core_if->data_fifo[i]);
+ }
+ core_if->pcgcctl = (uint32_t *) (reg_base + DWC_OTG_PCGCCTL_OFFSET);
+
+ /*
+ * Store the contents of the hardware configuration registers here for
+ * easy access later.
+ */
+ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1);
+ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2);
+#ifdef CONFIG_DWC_SLAVE
+ core_if->hwcfg2.b.architecture = DWC_SLAVE_ONLY_ARCH;
+#endif
+ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3);
+ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg1=%08x\n", core_if->hwcfg1.d32);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg2=%08x\n", core_if->hwcfg2.d32);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg3=%08x\n", core_if->hwcfg3.d32);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg4=%08x\n", core_if->hwcfg4.d32);
+ DWC_DEBUGPL(DBG_CILV, "op_mode=%0x\n", core_if->hwcfg2.b.op_mode);
+ DWC_DEBUGPL(DBG_CILV, "arch=%0x\n", core_if->hwcfg2.b.architecture);
+ DWC_DEBUGPL(DBG_CILV, "num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep + 1);
+ DWC_DEBUGPL(DBG_CILV, "num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan);
+ DWC_DEBUGPL(DBG_CILV, "nonperio_tx_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.nonperio_tx_q_depth);
+ DWC_DEBUGPL(DBG_CILV, "host_perio_tx_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.host_perio_tx_q_depth);
+ DWC_DEBUGPL(DBG_CILV, "dev_token_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.dev_token_q_depth);
+ DWC_DEBUGPL(DBG_CILV, "Total FIFO SZ=%d\n",
+ core_if->hwcfg3.b.dfifo_depth);
+ DWC_DEBUGPL(DBG_CILV, "xfer_size_cntr_width=%0x\n",
+ core_if->hwcfg3.b.xfer_size_cntr_width);
+
+ /*
+ * Set the SRP sucess bit for FS-I2c
+ */
+ core_if->srp_success = 0;
+ core_if->srp_timer_started = 0;
+ return core_if;
+}
+
+
+/**
+ * This function frees the structures allocated by dwc_otg_cil_init().
+ *
+ * @param[in] _core_if The core interface pointer returned from
+ * dwc_otg_cil_init().
+ *
+ */
+void dwc_otg_cil_remove(dwc_otg_core_if_t * _core_if)
+{
+ /* Disable all interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, 1, 0);
+ dwc_write_reg32(&_core_if->core_global_regs->gintmsk, 0);
+ if (_core_if->dev_if) {
+ kfree(_core_if->dev_if);
+ }
+ if (_core_if->host_if) {
+ kfree(_core_if->host_if);
+ }
+ kfree(_core_if);
+}
+
+
+/**
+ * This function enables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param[in] _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ gahbcfg_data_t ahbcfg = {.d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
+}
+
+/**
+ * This function disables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param[in] _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ gahbcfg_data_t ahbcfg = {.d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
+}
+
+/**
+ * This function initializes the commmon interrupts, used in both
+ * device and host modes.
+ *
+ * @param[in] _core_if Programming view of the DWC_otg controller
+ *
+ */
+static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+
+ /* Clear any pending OTG Interrupts */
+ dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF);
+
+ /* Clear any pending interrupts */
+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /*
+ * Enable the interrupts in the GINTMSK.
+ */
+ intr_mask.b.modemismatch = 1;
+ intr_mask.b.otgintr = 1;
+ if (!_core_if->dma_enable) {
+ intr_mask.b.rxstsqlvl = 1;
+ }
+ intr_mask.b.conidstschng = 1;
+ intr_mask.b.wkupintr = 1;
+ intr_mask.b.disconnect = 1;
+ intr_mask.b.usbsuspend = 1;
+ intr_mask.b.sessreqintr = 1;
+ dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32);
+}
+
+
+/**
+ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
+ * type.
+ */
+static void init_fslspclksel(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t val;
+ hcfg_data_t hcfg;
+ if (((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) ||
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+ /* Full speed PHY */
+ val = DWC_HCFG_48_MHZ;
+ } else {
+ /* High speed PHY running at full speed or high speed */
+ val = DWC_HCFG_30_60_MHZ;
+ }
+ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
+ hcfg.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hcfg);
+ hcfg.b.fslspclksel = val;
+ dwc_write_reg32(&_core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+}
+
+
+/**
+ * Initializes the DevSpd field of the DCFG register depending on the PHY type
+ * and the enumeration speed of the device.
+ */
+static void init_devspd(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t val;
+ dcfg_data_t dcfg;
+ if (((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) ||
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+ /* Full speed PHY */
+ val = 0x3;
+ } else if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
+ /* High speed PHY running at full speed */
+ val = 0x1;
+ } else {
+ /* High speed PHY running at high speed */
+ val = 0x0;
+ }
+ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
+ dcfg.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dcfg);
+ dcfg.b.devspd = val;
+ dwc_write_reg32(&_core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+
+/**
+ * This function calculates the number of IN EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param _pcd the pcd structure.
+ */
+static uint32_t calc_num_in_eps(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t num_in_eps = 0;
+ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2;
+ uint32_t num_tx_fifos = _core_if->hwcfg4.b.num_in_eps;
+ int i;
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x1))
+ num_in_eps++;
+ hwcfg1 >>= 2;
+ }
+ if (_core_if->hwcfg4.b.ded_fifo_en) {
+ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
+ }
+ return num_in_eps;
+}
+
+
+/**
+ * This function calculates the number of OUT EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param _pcd the pcd structure.
+ */
+static uint32_t calc_num_out_eps(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t num_out_eps = 0;
+ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2;
+ int i;
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x2))
+ num_out_eps++;
+ hwcfg1 >>= 2;
+ }
+ return num_out_eps;
+}
+
+
+/**
+ * This function initializes the DWC_otg controller registers and
+ * prepares the core for device mode or host mode operation.
+ *
+ * @param _core_if Programming view of the DWC_otg controller
+ *
+ */
+void dwc_otg_core_init(dwc_otg_core_if_t * _core_if)
+{
+ int i = 0;
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ gahbcfg_data_t ahbcfg = {.d32 = 0};
+ gusbcfg_data_t usbcfg = {.d32 = 0};
+ gi2cctl_data_t i2cctl = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", _core_if);
+
+ /* Common Initialization */
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ DWC_DEBUGPL(DBG_CIL, "USB config register: 0x%08x\n", usbcfg.d32);
+
+ /* Program the ULPI External VBUS bit if needed */
+#if defined(OTG_EXT_CHG_PUMP) || defined(CONFIG_460EX) || defined(CONFIG_APM82181)
+ usbcfg.b.ulpi_ext_vbus_drv = 1;
+#else
+ //usbcfg.b.ulpi_ext_vbus_drv = 0;
+ usbcfg.b.ulpi_ext_vbus_drv =
+ (_core_if->core_params->phy_ulpi_ext_vbus ==
+ DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
+#endif
+
+ /* Set external TS Dline pulsing */
+ usbcfg.b.term_sel_dl_pulse = (_core_if->core_params->ts_dline == 1) ? 1 : 0;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset the Controller */
+ dwc_otg_core_reset(_core_if);
+
+ /* Initialize parameters from Hardware configuration registers. */
+ dev_if->num_in_eps = calc_num_in_eps(_core_if);
+ dev_if->num_out_eps = calc_num_out_eps(_core_if);
+ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",
+ _core_if->hwcfg4.b.num_dev_perio_in_ep);
+ DWC_DEBUGPL(DBG_CIL, "Is power optimization enabled? %s\n",
+ _core_if->hwcfg4.b.power_optimiz ? "Yes" : "No");
+ DWC_DEBUGPL(DBG_CIL, "vbus_valid filter enabled? %s\n",
+ _core_if->hwcfg4.b.vbus_valid_filt_en ? "Yes" : "No");
+ DWC_DEBUGPL(DBG_CIL, "iddig filter enabled? %s\n",
+ _core_if->hwcfg4.b.iddig_filt_en ? "Yes" : "No");
+
+ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ dev_if->perio_tx_fifo_size[i] =
+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", i,
+ dev_if->perio_tx_fifo_size[i]);
+ }
+ for (i = 0; i < _core_if->hwcfg4.b.num_in_eps; i++) {
+ dev_if->tx_fifo_size[i] =
+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", i,
+ dev_if->perio_tx_fifo_size[i]);
+ }
+ _core_if->total_fifo_size = _core_if->hwcfg3.b.dfifo_depth;
+ _core_if->rx_fifo_size = dwc_read_reg32(&global_regs->grxfsiz);
+ _core_if->nperio_tx_fifo_size = dwc_read_reg32(&global_regs->gnptxfsiz) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", _core_if->rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n",_core_if->nperio_tx_fifo_size);
+
+ /* This programming sequence needs to happen in FS mode before any other
+ * programming occurs */
+ if ((_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+
+ /* If FS mode with FS PHY */
+
+ /* core_init() is now called on every switch so only call the
+ * following for the first time through.
+ */
+ if (!_core_if->phy_init_done) {
+ _core_if->phy_init_done = 1;
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.physel = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset after a PHY select */
+ dwc_otg_core_reset(_core_if);
+ }
+
+ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
+ * do this on HNP Dev/Host mode switches (done in dev_init and
+ * host_init).
+ */
+ if (dwc_otg_is_host_mode(_core_if)) {
+ DWC_DEBUGPL(DBG_CIL, "host mode\n");
+ init_fslspclksel(_core_if);
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "device mode\n");
+ init_devspd(_core_if);
+ }
+
+ if (_core_if->core_params->i2c_enable) {
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
+
+ /* Program GUSBCFG.OtgUtmifsSel to I2C */
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.otgutmifssel = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Program GI2CCTL.I2CEn */
+ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl);
+ i2cctl.b.i2cdevaddr = 1;
+ i2cctl.b.i2cen = 0;
+ dwc_write_reg32(&global_regs->gi2cctl, i2cctl.d32);
+ i2cctl.b.i2cen = 1;
+ dwc_write_reg32(&global_regs->gi2cctl, i2cctl.d32);
+ }
+ } /* endif speed == DWC_SPEED_PARAM_FULL */
+ else {
+ /* High speed PHY. */
+ if (!_core_if->phy_init_done) {
+ _core_if->phy_init_done = 1;
+ DWC_DEBUGPL(DBG_CIL, "High spped PHY\n");
+ /* HS PHY parameters. These parameters are preserved
+ * during soft reset so only program the first time. Do
+ * a soft reset immediately after setting phyif.
+ */
+ // test-only: in AMCC 460EX code not used!!!???
+ usbcfg.b.ulpi_utmi_sel = _core_if->core_params->phy_type;
+ if (usbcfg.b.ulpi_utmi_sel == 1) {
+ DWC_DEBUGPL(DBG_CIL, "ULPI\n");
+ /* ULPI interface */
+ usbcfg.b.phyif = 0;
+ usbcfg.b.ddrsel = _core_if->core_params->phy_ulpi_ddr;
+ } else {
+ /* UTMI+ interface */
+ if (_core_if->core_params->phy_utmi_width == 16) {
+ usbcfg.b.phyif = 1;
+ DWC_DEBUGPL(DBG_CIL, "UTMI+ 16\n");
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "UTMI+ 8\n");
+ usbcfg.b.phyif = 0;
+ }
+ }
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ /* Reset after setting the PHY parameters */
+ dwc_otg_core_reset(_core_if);
+ }
+ }
+ if ((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) {
+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 1;
+ usbcfg.b.ulpi_clk_sus_m = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS=0\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 0;
+ usbcfg.b.ulpi_clk_sus_m = 0;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ }
+
+ /* Program the GAHBCFG Register. */
+ switch (_core_if->hwcfg2.b.architecture) {
+ case DWC_SLAVE_ONLY_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
+ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ _core_if->dma_enable = 0;
+ break;
+ case DWC_EXT_DMA_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
+ ahbcfg.b.hburstlen = _core_if->core_params->dma_burst_size;
+ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0);
+ break;
+ case DWC_INT_DMA_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
+ #if defined(CONFIG_APM82181)
+ /* Avoid system hang during concurrently using USB and SATA */
+ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR16;
+ #else
+ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR;
+ #endif
+ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0);
+ break;
+ }
+ ahbcfg.b.dmaenable = _core_if->dma_enable;
+ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32);
+ _core_if->en_multiple_tx_fifo = _core_if->hwcfg4.b.ded_fifo_en;
+
+ /*
+ * Program the GUSBCFG register.
+ */
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ switch (_core_if->hwcfg2.b.op_mode) {
+ case DWC_MODE_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = (_core_if->core_params->otg_cap ==
+ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_SRP_ONLY_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_NO_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ case DWC_MODE_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ case DWC_MODE_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_NO_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ }
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Enable common interrupts */
+ dwc_otg_enable_common_interrupts(_core_if);
+
+ /* Do device or host intialization based on mode during PCD
+ * and HCD initialization
+ */
+ if (dwc_otg_is_host_mode(_core_if)) {
+ DWC_DEBUGPL(DBG_ANY, "Host Mode\n");
+ _core_if->op_state = A_HOST;
+ } else {
+ DWC_DEBUGPL(DBG_ANY, "Device Mode\n");
+ _core_if->op_state = B_PERIPHERAL;
+#ifdef CONFIG_DWC_DEVICE_ONLY
+ dwc_otg_core_dev_init(_core_if);
+#endif /* */
+ }
+}
+
+
+/**
+ * This function enables the Device mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts */
+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ dwc_otg_enable_common_interrupts(_core_if);
+
+ /* Enable interrupts */
+ intr_mask.b.usbreset = 1;
+ intr_mask.b.enumdone = 1;
+ intr_mask.b.inepintr = 1;
+ intr_mask.b.outepintr = 1;
+ intr_mask.b.erlysuspend = 1;
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.epmismatch = 1;
+ }
+
+ /** @todo NGS: Should this be a module parameter? */
+#ifdef USE_PERIODIC_EP
+ intr_mask.b.isooutdrop = 1;
+ intr_mask.b.eopframe = 1;
+ intr_mask.b.incomplisoin = 1;
+ intr_mask.b.incomplisoout = 1;
+#endif /* */
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32,
+ intr_mask.d32);
+
+ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
+ dwc_read_reg32(&global_regs->gintmsk));
+}
+
+
+/**
+ * This function initializes the DWC_otg controller registers for
+ * device mode.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_dev_init(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ dwc_otg_core_params_t * params = _core_if->core_params;
+ dcfg_data_t dcfg = {.d32 = 0};
+ grstctl_t resetctl = {.d32 = 0};
+ uint32_t rx_fifo_size;
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t txfifosize;
+ dthrctl_data_t dthrctl;
+ fifosize_data_t ptxfifosize;
+
+ /* Restart the Phy Clock */
+ dwc_write_reg32(_core_if->pcgcctl, 0);
+
+ /* Device configuration register */
+ init_devspd(_core_if);
+ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
+ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ /* Configure data FIFO sizes */
+ if (_core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
+ _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
+ params->dev_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
+ params->dev_nperio_tx_fifo_size);
+
+ /* Rx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->grxfsiz));
+ rx_fifo_size = params->dev_rx_fifo_size;
+ dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->grxfsiz));
+
+ /** Set Periodic Tx FIFO Mask all bits 0 */
+ _core_if->p_tx_msk = 0;
+
+ /** Set Tx FIFO Mask all bits 0 */
+ _core_if->tx_msk = 0;
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz,nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+
+ /**@todo NGS: Fix Periodic FIFO Sizing! */
+ /*
+ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_perio_tx_fifo_size array and the FIFO size registers in
+ * the dptxfsiz array run from 0 to 14.
+ */
+ /** @todo Finish debug of this */
+ ptxfifosize.b.startaddr =
+ nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) {
+ ptxfifosize.b.depth = params->dev_perio_tx_fifo_size[i];
+ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i],ptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ ptxfifosize.b.startaddr += ptxfifosize.b.depth;
+ }
+ } else {
+
+ /*
+ * Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_tx_fifo_size array and the FIFO size registers in
+ * the dptxfsiz_dieptxf array run from 0 to 14.
+ */
+
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ for (i = 1;i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) {
+ txfifosize.b.depth = params->dev_tx_fifo_size[i];
+ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i - 1],txfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i-1]));
+ txfifosize.b.startaddr += txfifosize.b.depth;
+ }
+ }
+ }
+
+ /* Flush the FIFOs */
+ dwc_otg_flush_tx_fifo(_core_if, 0x10); /* all Tx FIFOs */
+ dwc_otg_flush_rx_fifo(_core_if);
+
+ /* Flush the Learning Queue. */
+ resetctl.b.intknqflsh = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->grstctl, resetctl.d32);
+
+ /* Clear all pending Device Interrupts */
+ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0);
+ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0);
+ dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
+ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0);
+ for (i = 0; i <= dev_if->num_in_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
+ if (depctl.b.epena) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ } else {
+ depctl.d32 = 0;
+ }
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
+ }
+ for (i = 0; i <= dev_if->num_out_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
+ if (depctl.b.epena) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ } else {
+ depctl.d32 = 0;
+ }
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0);
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
+ }
+ if (_core_if->en_multiple_tx_fifo && _core_if->dma_enable) {
+ dev_if->non_iso_tx_thr_en = _core_if->core_params->thr_ctl & 0x1;
+ dev_if->iso_tx_thr_en = (_core_if->core_params->thr_ctl >> 1) & 0x1;
+ dev_if->rx_thr_en = (_core_if->core_params->thr_ctl >> 2) & 0x1;
+ dev_if->rx_thr_length = _core_if->core_params->rx_thr_length;
+ dev_if->tx_thr_length = _core_if->core_params->tx_thr_length;
+ dthrctl.d32 = 0;
+ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
+ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
+ dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
+ dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
+ dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
+ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl,dthrctl.d32);
+ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\n"
+ "Rx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
+ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en,
+ dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len,
+ dthrctl.b.rx_thr_len);
+ }
+ dwc_otg_enable_device_interrupts(_core_if);
+ {
+ diepmsk_data_t msk = {.d32 = 0};
+ msk.b.txfifoundrn = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32,msk.d32);
+ }
+}
+
+
+/**
+ * This function enables the Host mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts. */
+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ dwc_otg_enable_common_interrupts(_core_if);
+
+ /*
+ * Enable host mode interrupts without disturbing common
+ * interrupts.
+ */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+}
+
+/**
+ * This function disables the Host Mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
+
+ /*
+ * Disable host mode interrupts without disturbing common
+ * interrupts.
+ */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+ intr_mask.b.ptxfempty = 1;
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
+}
+
+#if 0
+/* currently not used, keep it here as if needed later */
+static int phy_read(dwc_otg_core_if_t * _core_if, int addr)
+{
+ u32 val;
+ int timeout = 10;
+
+ dwc_write_reg32(&_core_if->core_global_regs->gpvndctl,
+ 0x02000000 | (addr << 16));
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ while (((val & 0x08000000) == 0) && (timeout--)) {
+ udelay(1000);
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ }
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ printk("%s: addr=%02x regval=%02x\n", __func__, addr, val & 0x000000ff);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_405EX
+static int phy_write(dwc_otg_core_if_t * _core_if, int addr, int val8)
+{
+ u32 val;
+ int timeout = 10;
+
+ dwc_write_reg32(&_core_if->core_global_regs->gpvndctl,
+ 0x02000000 | 0x00400000 | (addr << 16) | (val8 & 0x000000ff));
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ while (((val & 0x08000000) == 0) && (timeout--)) {
+ udelay(1000);
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ }
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+
+ return 0;
+}
+#endif
+
+/**
+ * This function initializes the DWC_otg controller registers for
+ * host mode.
+ *
+ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ * request queues. Host channels are reset to ensure that they are ready for
+ * performing transfers.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_host_init(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ dwc_otg_host_if_t * host_if = _core_if->host_if;
+ dwc_otg_core_params_t * params = _core_if->core_params;
+ hprt0_data_t hprt0 = {.d32 = 0};
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t ptxfifosize;
+ int i;
+ hcchar_data_t hcchar;
+ hcfg_data_t hcfg;
+ dwc_otg_hc_regs_t * hc_regs;
+ int num_channels;
+ gotgctl_data_t gotgctl = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, _core_if);
+
+ /* Restart the Phy Clock */
+ dwc_write_reg32(_core_if->pcgcctl, 0);
+
+ /* Initialize Host Configuration Register */
+ init_fslspclksel(_core_if);
+ if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+ hcfg.b.fslssupp = 1;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ }
+
+ /* Configure data FIFO sizes */
+ if (_core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->host_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",params->host_nperio_tx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size);
+
+ /* Rx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",dwc_read_reg32(&global_regs->grxfsiz));
+ dwc_write_reg32(&global_regs->grxfsiz,params->host_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",dwc_read_reg32(&global_regs->grxfsiz));
+
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->host_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
+
+ /* Periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial hptxfsiz=%08x\n",dwc_read_reg32(&global_regs->hptxfsiz));
+ ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
+ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
+ }
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ gotgctl.b.hstsethnpen = 1;
+ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
+
+ /* Make sure the FIFOs are flushed. */
+ dwc_otg_flush_tx_fifo(_core_if, 0x10 /* all Tx FIFOs */ );
+ dwc_otg_flush_rx_fifo(_core_if);
+
+ /* Flush out any leftover queued requests. */
+ num_channels = _core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ hc_regs = _core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ }
+
+ /* Halt all channels to put them into a known state. */
+ for (i = 0; i < num_channels; i++) {
+ int count = 0;
+ hc_regs = _core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
+
+ do {
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (++count > 200) {
+ DWC_ERROR
+ ("%s: Unable to clear halt on channel %d\n",
+ __func__, i);
+ break;
+ }
+ udelay(100);
+ } while (hcchar.b.chen);
+ }
+
+ /* Turn on the vbus power. */
+ DWC_PRINT("Init: Port Power? op_state=%d\n", _core_if->op_state);
+ if (_core_if->op_state == A_HOST) {
+ hprt0.d32 = dwc_otg_read_hprt0(_core_if);
+ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
+ if (hprt0.b.prtpwr == 0) {
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(host_if->hprt0, hprt0.d32);
+ }
+ }
+
+#ifdef CONFIG_405EX
+ /* Write 0x60 to USB PHY register 7:
+ * Modify "Indicator Complement" and "Indicator Pass Thru" of
+ * Interface control register to disable the internal Vbus
+ * comparator, as suggested by RichTek FAE.
+ * This produced better results recognizing and mounting USB
+ * memory sticks on the Makalu 405EX platform. I couldn't see
+ * any difference on Kilauea, but since it seems to be better
+ * on Makalu, let's keep it in here too.
+ */
+ phy_write(_core_if, 7, 0x60);
+#endif
+
+ dwc_otg_enable_host_interrupts(_core_if);
+}
+
+
+/**
+ * Prepares a host channel for transferring packets to/from a specific
+ * endpoint. The HCCHARn register is set up with the characteristics specified
+ * in _hc. Host channel interrupts that may need to be serviced while this
+ * transfer is in progress are enabled.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ * @param _hc Information needed to initialize the host channel
+ */
+void dwc_otg_hc_init(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ uint32_t intr_enable;
+ hcintmsk_data_t hc_intr_mask;
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ uint8_t hc_num = _hc->hc_num;
+ dwc_otg_host_if_t * host_if = _core_if->host_if;
+ dwc_otg_hc_regs_t * hc_regs = host_if->hc_regs[hc_num];
+
+ /* Clear old interrupt conditions for this host channel. */
+ hc_intr_mask.d32 = 0xFFFFFFFF;
+ hc_intr_mask.b.reserved = 0;
+ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32);
+
+ /* Enable channel interrupts required for this transfer. */
+ hc_intr_mask.d32 = 0;
+ hc_intr_mask.b.chhltd = 1;
+ if (_core_if->dma_enable) {
+ hc_intr_mask.b.ahberr = 1;
+ if (_hc->error_state && !_hc->do_split &&
+ _hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
+ hc_intr_mask.b.ack = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.datatglerr = 1;
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
+ hc_intr_mask.b.nak = 1;
+ }
+ }
+ }
+ } else {
+ switch (_hc->ep_type) {
+ case DWC_OTG_EP_TYPE_CONTROL:
+ case DWC_OTG_EP_TYPE_BULK:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ } else {
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.nyet = 1;
+ if (_hc->do_ping) {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ if (_hc->do_split) {
+ hc_intr_mask.b.nak = 1;
+ if (_hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ } else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ if (_hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ break;
+ case DWC_OTG_EP_TYPE_INTR:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ hc_intr_mask.b.frmovrun = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ }
+ if (_hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ if (_hc->do_split) {
+ if (_hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ } else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ break;
+ case DWC_OTG_EP_TYPE_ISOC:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.frmovrun = 1;
+ hc_intr_mask.b.ack = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.bblerr = 1;
+ }
+ break;
+ }
+ }
+ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32);
+
+ /* Enable the top level host channel interrupt. */
+ intr_enable = (1 << hc_num);
+ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
+
+ /* Make sure host channel interrupts are enabled. */
+ gintmsk.b.hcintr = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
+
+ /*
+ * Program the HCCHARn register with the endpoint characteristics for
+ * the current transfer.
+ */
+ hcchar.d32 = 0;
+ hcchar.b.devaddr = _hc->dev_addr;
+ hcchar.b.epnum = _hc->ep_num;
+ hcchar.b.epdir = _hc->ep_is_in;
+ hcchar.b.lspddev = (_hc->speed == DWC_OTG_EP_SPEED_LOW);
+ hcchar.b.eptype = _hc->ep_type;
+ hcchar.b.mps = _hc->max_packet;
+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
+ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
+ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
+ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
+ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
+
+ /*
+ * Program the HCSPLIT register for SPLITs
+ */
+ hcsplt.d32 = 0;
+ if (_hc->do_split) {
+ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n",
+ _hc->hc_num,_hc->complete_split ? "CSPLIT" : "SSPLIT");
+ hcsplt.b.compsplt = _hc->complete_split;
+ hcsplt.b.xactpos = _hc->xact_pos;
+ hcsplt.b.hubaddr = _hc->hub_addr;
+ hcsplt.b.prtaddr = _hc->port_addr;
+ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", _hc->complete_split);
+ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", _hc->xact_pos);
+ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", _hc->hub_addr);
+ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", _hc->port_addr);
+ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", _hc->ep_is_in);
+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", _hc->xfer_len);
+ }
+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
+}
+
+
+/**
+ * Attempts to halt a host channel. This function should only be called in
+ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
+ * normal circumstances in DMA mode, the controller halts the channel when the
+ * transfer is complete or a condition occurs that requires application
+ * intervention.
+ *
+ * In slave mode, checks for a free request queue entry, then sets the Channel
+ * Enable and Channel Disable bits of the Host Channel Characteristics
+ * register of the specified channel to intiate the halt. If there is no free
+ * request queue entry, sets only the Channel Disable bit of the HCCHARn
+ * register to flush requests for this channel. In the latter case, sets a
+ * flag to indicate that the host channel needs to be halted when a request
+ * queue slot is open.
+ *
+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ * HCCHARn register. The controller ensures there is space in the request
+ * queue before submitting the halt request.
+ *
+ * Some time may elapse before the core flushes any posted requests for this
+ * host channel and halts. The Channel Halted interrupt handler completes the
+ * deactivation of the host channel.
+ *
+ * @param _core_if Controller register interface.
+ * @param _hc Host channel to halt.
+ * @param _halt_status Reason for halting the channel.
+ */
+void dwc_otg_hc_halt(dwc_otg_core_if_t * _core_if,
+ dwc_hc_t * _hc, dwc_otg_halt_status_e _halt_status)
+{
+ gnptxsts_data_t nptxsts;
+ hptxsts_data_t hptxsts;
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t * hc_regs;
+ dwc_otg_core_global_regs_t * global_regs;
+ dwc_otg_host_global_regs_t * host_global_regs;
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ global_regs = _core_if->core_global_regs;
+ host_global_regs = _core_if->host_if->host_global_regs;
+ WARN_ON(_halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS);
+ if (_halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
+ _halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+
+ /*
+ * Disable all channel interrupts except Ch Halted. The QTD
+ * and QH state associated with this transfer has been cleared
+ * (in the case of URB_DEQUEUE), so the channel needs to be
+ * shut down carefully to prevent crashes.
+ */
+ hcintmsk_data_t hcintmsk;
+ hcintmsk.d32 = 0;
+ hcintmsk.b.chhltd = 1;
+ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32);
+
+ /*
+ * Make sure no other interrupts besides halt are currently
+ * pending. Handling another interrupt could cause a crash due
+ * to the QTD and QH state.
+ */
+ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32);
+
+ /*
+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+ * even if the channel was already halted for some other
+ * reason.
+ */
+ _hc->halt_status = _halt_status;
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen == 0) {
+ /*
+ * The channel is either already halted or it hasn't
+ * started yet. In DMA mode, the transfer may halt if
+ * it finishes normally or a condition occurs that
+ * requires driver intervention. Don't want to halt
+ * the channel again. In either Slave or DMA mode,
+ * it's possible that the transfer has been assigned
+ * to a channel, but not started yet when an URB is
+ * dequeued. Don't want to halt a channel that hasn't
+ * started yet.
+ */
+ return;
+ }
+ }
+ if (_hc->halt_pending) {
+
+ /*
+ * A halt has already been issued for this channel. This might
+ * happen when a transfer is aborted by a higher level in
+ * the stack.
+ */
+#ifdef CONFIG_DWC_DEBUG
+ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n",
+ __func__, _hc->hc_num);
+/* dwc_otg_dump_global_registers(_core_if); */
+/* dwc_otg_dump_host_registers(_core_if); */
+#endif /* */
+ return;
+ }
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+ if (!_core_if->dma_enable) {
+ /* Check for space in the request queue to issue the halt. */
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL
+ || _hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
+ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ if (nptxsts.b.nptxqspcavail == 0) {
+ hcchar.b.chen = 0;
+ }
+ } else {
+ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts);
+ if ((hptxsts.b.ptxqspcavail == 0) ||
+ (_core_if->queuing_high_bandwidth)) {
+ hcchar.b.chen = 0;
+ }
+ }
+ }
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ _hc->halt_status = _halt_status;
+ if (hcchar.b.chen) {
+ _hc->halt_pending = 1;
+ _hc->halt_on_queue = 0;
+ } else {
+ _hc->halt_on_queue = 1;
+ }
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", _hc->halt_pending);
+ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", _hc->halt_on_queue);
+ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", _hc->halt_status);
+ return;
+}
+
+
+/**
+ * Clears the transfer state for a host channel. This function is normally
+ * called after a transfer is done and the host channel is being released.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Identifies the host channel to clean up.
+ */
+void dwc_otg_hc_cleanup(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ dwc_otg_hc_regs_t * hc_regs;
+ _hc->xfer_started = 0;
+
+ /*
+ * Clear channel interrupt enables and any unhandled channel interrupt
+ * conditions.
+ */
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ dwc_write_reg32(&hc_regs->hcintmsk, 0);
+ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF);
+
+#ifdef CONFIG_DWC_DEBUG
+ del_timer(&_core_if->hc_xfer_timer[_hc->hc_num]);
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, _hc->hc_num, hcchar.d32);
+ }
+ }
+#endif /* */
+}
+
+
+/**
+ * Sets the channel property that indicates in which frame a periodic transfer
+ * should occur. This is always set to the _next_ frame. This function has no
+ * effect on non-periodic transfers.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Identifies the host channel to set up and its properties.
+ * @param _hcchar Current value of the HCCHAR register for the specified host
+ * channel.
+ */
+static inline void hc_set_even_odd_frame(dwc_otg_core_if_t * _core_if,
+ dwc_hc_t * _hc, hcchar_data_t * _hcchar)
+{
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ hfnum_data_t hfnum;
+ hfnum.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hfnum);
+
+ /* 1 if _next_ frame is odd, 0 if it's even */
+ _hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
+
+#ifdef CONFIG_DWC_DEBUG
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR && _hc->do_split
+ && !_hc->complete_split) {
+ switch (hfnum.b.frnum & 0x7) {
+ case 7:
+ _core_if->hfnum_7_samples++;
+ _core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
+ break;
+ case 0:
+ _core_if->hfnum_0_samples++;
+ _core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
+ break;
+ default:
+ _core_if->hfnum_other_samples++;
+ _core_if->hfnum_other_frrem_accum +=
+ hfnum.b.frrem;
+ break;
+ }
+ }
+#endif /* */
+ }
+}
+
+#ifdef CONFIG_DWC_DEBUG
+static void hc_xfer_timeout(unsigned long _ptr)
+{
+ hc_xfer_info_t * xfer_info = (hc_xfer_info_t *) _ptr;
+ int hc_num = xfer_info->hc->hc_num;
+ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
+ DWC_WARN(" start_hcchar_val 0x%08x\n",
+ xfer_info->core_if->start_hcchar_val[hc_num]);
+}
+#endif /* */
+
+/*
+ * This function does the setup for a data transfer for a host channel and
+ * starts the transfer. May be called in either Slave mode or DMA mode. In
+ * Slave mode, the caller must ensure that there is sufficient space in the
+ * request queue and Tx Data FIFO.
+ *
+ * For an OUT transfer in Slave mode, it loads a data packet into the
+ * appropriate FIFO. If necessary, additional data packets will be loaded in
+ * the Host ISR.
+ *
+ * For an IN transfer in Slave mode, a data packet is requested. The data
+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ * additional data packets are requested in the Host ISR.
+ *
+ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ * register along with a packet count of 1 and the channel is enabled. This
+ * causes a single PING transaction to occur. Other fields in HCTSIZ are
+ * simply set to 0 since no data transfer occurs in this case.
+ *
+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ * all the information required to perform the subsequent data transfer. In
+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ * controller performs the entire PING protocol, then starts the data
+ * transfer.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Information needed to initialize the host channel. The xfer_len
+ * value may be reduced to accommodate the max widths of the XferSize and
+ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
+ * to reflect the final xfer_len value.
+ */
+void dwc_otg_hc_start_transfer(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ uint16_t num_packets;
+ uint32_t max_hc_xfer_size = _core_if->core_params->max_transfer_size;
+ uint16_t max_hc_pkt_count = _core_if->core_params->max_packet_count;
+ dwc_otg_hc_regs_t * hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ hctsiz.d32 = 0;
+ if (_hc->do_ping) {
+ if (!_core_if->dma_enable) {
+ dwc_otg_hc_do_ping(_core_if, _hc);
+ _hc->xfer_started = 1;
+ return;
+ } else {
+ hctsiz.b.dopng = 1;
+ }
+ }
+ if (_hc->do_split) {
+ num_packets = 1;
+ if (_hc->complete_split && !_hc->ep_is_in) {
+ /* For CSPLIT OUT Transfer, set the size to 0 so the
+ * core doesn't expect any data written to the FIFO */
+ _hc->xfer_len = 0;
+ } else if (_hc->ep_is_in || (_hc->xfer_len > _hc->max_packet)) {
+ _hc->xfer_len = _hc->max_packet;
+ } else if (!_hc->ep_is_in && (_hc->xfer_len > 188)) {
+ _hc->xfer_len = 188;
+ }
+ hctsiz.b.xfersize = _hc->xfer_len;
+ } else {
+ /*
+ * Ensure that the transfer length and packet count will fit
+ * in the widths allocated for them in the HCTSIZn register.
+ */
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR
+ || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure the transfer size is no larger than one
+ * (micro)frame's worth of data. (A check was done
+ * when the periodic transfer was accepted to ensure
+ * that a (micro)frame's worth of data can be
+ * programmed into a channel.)
+ */
+ uint32_t max_periodic_len = _hc->multi_count * _hc->max_packet;
+ if (_hc->xfer_len > max_periodic_len) {
+ _hc->xfer_len = max_periodic_len;
+ } else {
+ }
+ } else if (_hc->xfer_len > max_hc_xfer_size) {
+ /* Make sure that xfer_len is a multiple of max packet size. */
+ _hc->xfer_len = max_hc_xfer_size - _hc->max_packet + 1;
+ }
+ if (_hc->xfer_len > 0) {
+ num_packets = (_hc->xfer_len + _hc->max_packet - 1) / _hc->max_packet;
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ _hc->xfer_len = num_packets * _hc->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0. */
+ num_packets = 1;
+ }
+ if (_hc->ep_is_in) {
+ /* Always program an integral # of max packets for IN transfers. */
+ _hc->xfer_len = num_packets * _hc->max_packet;
+ }
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR
+ || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure that the multi_count field matches the
+ * actual transfer length.
+ */
+ _hc->multi_count = num_packets;
+ }
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /* Set up the initial PID for the transfer. */
+ if (_hc->speed == DWC_OTG_EP_SPEED_HIGH) {
+ if (_hc->ep_is_in) {
+ if (_hc->multi_count == 1) {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA0;
+ } else if (_hc->multi_count == 2) {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA1;
+ } else {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA2;
+ }
+ } else {
+ if (_hc->multi_count == 1) {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA0;
+ } else {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_MDATA;
+ }
+ }
+ } else {
+ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
+ }
+ }
+ hctsiz.b.xfersize = _hc->xfer_len;
+ }
+ _hc->start_pkt_count = num_packets;
+ hctsiz.b.pktcnt = num_packets;
+ hctsiz.b.pid = _hc->data_pid_start;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
+ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
+ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&hc_regs->hcdma, (uint32_t) _hc->xfer_buff);
+ }
+
+ /* Start the split */
+ if (_hc->do_split) {
+ hcsplt_data_t hcsplt;
+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+ hcsplt.b.spltena = 1;
+ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32);
+ }
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.multicnt = _hc->multi_count;
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+
+#ifdef CONFIG_DWC_DEBUG
+ _core_if->start_hcchar_val[_hc->hc_num] = hcchar.d32;
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, _hc->hc_num, hcchar.d32);
+ }
+
+#endif /* */
+
+ /* Set host channel enable after all other setup is complete. */
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ _hc->xfer_started = 1;
+ _hc->requests++;
+ if (!_core_if->dma_enable && !_hc->ep_is_in && _hc->xfer_len > 0) {
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ dwc_otg_hc_write_packet(_core_if, _hc);
+ }
+
+#ifdef CONFIG_DWC_DEBUG
+ /* Start a timer for this transfer. */
+ _core_if->hc_xfer_timer[_hc->hc_num].function = hc_xfer_timeout;
+ _core_if->hc_xfer_info[_hc->hc_num].core_if = _core_if;
+ _core_if->hc_xfer_info[_hc->hc_num].hc = _hc;
+ _core_if->hc_xfer_timer[_hc->hc_num].data =
+ (unsigned long)(&_core_if->hc_xfer_info[_hc->hc_num]);
+ _core_if->hc_xfer_timer[_hc->hc_num].expires = jiffies + (HZ * 10);
+ add_timer(&_core_if->hc_xfer_timer[_hc->hc_num]);
+#endif /* */
+}
+
+/**
+ * This function continues a data transfer that was started by previous call
+ * to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is
+ * sufficient space in the request queue and Tx Data FIFO. This function
+ * should only be called in Slave mode. In DMA mode, the controller acts
+ * autonomously to complete transfers programmed to a host channel.
+ *
+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
+ * if there is any data remaining to be queued. For an IN transfer, another
+ * data packet is always requested. For the SETUP phase of a control transfer,
+ * this function does nothing.
+ *
+ * @return 1 if a new request is queued, 0 if no more requests are required
+ * for this transfer.
+ */
+int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ if (_hc->do_split) {
+ /* SPLITs always queue just once per channel */
+ return 0;
+ } else if (_hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
+ /* SETUPs are queued only once since they can't be NAKed. */
+ return 0;
+ } else if (_hc->ep_is_in) {
+ /*
+ * Always queue another request for other IN transfers. If
+ * back-to-back INs are issued and NAKs are received for both,
+ * the driver may still be processing the first NAK when the
+ * second NAK is received. When the interrupt handler clears
+ * the NAK interrupt for the first NAK, the second NAK will
+ * not be seen. So we can't depend on the NAK interrupt
+ * handler to requeue a NAKed request. Instead, IN requests
+ * are issued each time this function is called. When the
+ * transfer completes, the extra requests for the channel will
+ * be flushed.
+ */
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t * hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32);
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ _hc->requests++;
+ return 1;
+ } else {
+ /* OUT transfers. */
+ if (_hc->xfer_count < _hc->xfer_len) {
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t * hc_regs;
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+ }
+
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ dwc_otg_hc_write_packet(_core_if, _hc);
+ _hc->requests++;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/**
+ * Starts a PING transfer. This function should only be called in Slave mode.
+ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
+ */
+void dwc_otg_hc_do_ping(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ dwc_otg_hc_regs_t * hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ hctsiz.d32 = 0;
+ hctsiz.b.dopng = 1;
+ hctsiz.b.pktcnt = 1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+}
+
+
+#ifdef CONFIG_OTG_PLB_DMA /* PPC_PLB_DMA mode */
+/*
+ * This will dump the status of the dma registers -
+ * Only used in debug mode
+ */
+void ppc4xx_dump_dma(unsigned int dmanr)
+{
+ int index;
+
+ printk("%32s:\n", __FUNCTION__);
+ for (index=0; index<=7; index++) {
+ printk("%32s dmanr=%d , 0x%x=0x%x\n",__FUNCTION__, dmanr ,
+ DCRN_DMACR0 + dmanr*8+index, mfdcr(DCRN_DMACR0 + dmanr*8 + index));
+ }
+ printk("%32s DCRN_DMASR=0x%x\n", __FUNCTION__, mfdcr(DCRN_DMASR));
+}
+
+/*
+ * This function programs the PLB-DMA engine to perform MEM-MEM transfer
+ * This is used to RD & WR from the DWC_FIFO by the PLB_DMA engine
+ */
+void ppc4xx_start_plb_dma(dwc_otg_core_if_t *_core_if, void *src, void *dst, unsigned int length,
+ unsigned int use_interrupt, unsigned int dma_ch, unsigned int dma_dir)
+{
+ int res = 0;
+ unsigned int control;
+ ppc_dma_ch_t p_init;
+
+ memset((char *)&p_init, sizeof(p_init), 0);
+ p_init.polarity = 0;
+ p_init.pwidth = PW_32;
+ p_init.in_use = 0;
+ if ( dma_dir == OTG_TX_DMA) {
+ p_init.sai = 1;
+ p_init.dai = 0;
+ } else if (dma_dir == OTG_RX_DMA) {
+ p_init.sai = 0;
+ p_init.dai = 1;
+ }
+ res = ppc4xx_init_dma_channel(dma_ch, &p_init);
+ if (res) {
+ printk("%32s: nit_dma_channel return %d %d bytes dest %p\n",
+ __FUNCTION__, res, length, dst);
+ }
+ res = ppc4xx_clr_dma_status(dma_ch);
+ if (res) {
+ printk("%32s: ppc4xx_clr_dma_status %d\n", __FUNCTION__, res);
+ }
+
+ if (dma_dir == OTG_TX_DMA) {
+ ppc4xx_set_src_addr(dma_ch, virt_to_bus (src));
+ ppc4xx_set_dst_addr(dma_ch, (_core_if->phys_addr +
+ (dst - (void *)(_core_if->core_global_regs))) );
+ } else if (dma_dir == OTG_RX_DMA) {
+ ppc4xx_set_src_addr(dma_ch, (_core_if->phys_addr +
+ (src - (void *)(_core_if->core_global_regs))) );
+ ppc4xx_set_dst_addr(dma_ch, virt_to_bus (dst));
+ }
+
+ ppc4xx_set_dma_mode(dma_ch, DMA_MODE_MM);
+ ppc4xx_set_dma_count(dma_ch, length);
+
+ /* flush cache before enabling DMA transfer */
+ if (dma_dir == OTG_TX_DMA) {
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)(src + length));
+ } else if (dma_dir == OTG_RX_DMA) {
+ flush_dcache_range((unsigned long)dst,
+ (unsigned long)(dst + length));
+ }
+
+ if (use_interrupt) {
+ res = ppc4xx_enable_dma_interrupt(dma_ch);
+ } else {
+ res = ppc4xx_disable_dma_interrupt(dma_ch);
+ }
+ if (res) {
+ printk("%32s: en/disable_dma_interrupt %d return %d per %d\n",
+ __FUNCTION__, use_interrupt, res,
+ ppc4xx_get_peripheral_width(dma_ch));
+ }
+
+ control = mfdcr(DCRN_DMACR0 + (dma_ch * 8));
+
+ control &= ~(SET_DMA_BEN(1));
+ control &= ~(SET_DMA_PSC(3));
+ control &= ~(SET_DMA_PWC(0x3f));
+ control &= ~(SET_DMA_PHC(0x7));
+ control &= ~(SET_DMA_PL(1));
+
+ mtdcr(DCRN_DMACR0 + (dma_ch * 8), control);
+
+#ifdef OTG_PLB_DMA_DBG
+ ppc4xx_dump_dma(dma_ch);
+#endif
+ ppc4xx_enable_dma(dma_ch);
+}
+#endif
+
+/*
+ * This function writes a packet into the Tx FIFO associated with the Host
+ * Channel. For a channel associated with a non-periodic EP, the non-periodic
+ * Tx FIFO is written. For a channel associated with a periodic EP, the
+ * periodic Tx FIFO is written. This function should only be called in Slave
+ * mode.
+ *
+ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
+ * then number of bytes written to the Tx FIFO.
+ */
+void dwc_otg_hc_write_packet(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+#ifndef CONFIG_OTG_PLB_DMA
+ uint32_t i;
+#endif
+ uint32_t remaining_count;
+ uint32_t byte_count;
+ uint32_t dword_count;
+ uint32_t * data_buff = (uint32_t *) (_hc->xfer_buff);
+ uint32_t * data_fifo = _core_if->data_fifo[_hc->hc_num];
+#if !defined( CONFIG_OTG_PLB_DMA_TASKLET) && defined(CONFIG_OTG_PLB_DMA)
+ uint32_t dma_sts = 0;
+#endif
+ remaining_count = _hc->xfer_len - _hc->xfer_count;
+ if (remaining_count > _hc->max_packet) {
+ byte_count = _hc->max_packet;
+ } else {
+ byte_count = remaining_count;
+ }
+ dword_count = (byte_count + 3) / 4;
+
+#ifdef CONFIG_OTG_PLB_DMA
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+
+ if ( _hc->xfer_len < USB_BUFSIZ) {
+ int i;
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* xfer_buff is DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, *data_buff);
+ }
+ } else {
+ /* xfer_buff is not DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, get_unaligned(data_buff));
+ }
+ }
+ } else {
+ DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n", __func__, dword_count);
+ atomic_set(& release_later, 1);
+ //disable_irq_nosync(94);
+ dwc_otg_disable_global_interrupts(_core_if);
+
+ _core_if->dma_xfer.dma_data_buff = data_buff;
+ _core_if->dma_xfer.dma_data_fifo = (void *)data_fifo;
+ _core_if->dma_xfer.dma_count = dword_count;
+ _core_if->dma_xfer.dma_dir = OTG_TX_DMA;
+ tasklet_schedule(_core_if->plbdma_tasklet);
+ }
+#else /* !CONFIG_OTG_PLB_DMA_TASKLET */
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* call tx_dma - src,dest,len,intr */
+ ppc4xx_start_plb_dma(_core_if, (void *)data_buff, data_fifo,
+ (dword_count * 4), PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_TX_DMA);
+ } else {
+ ppc4xx_start_plb_dma(_core_if, (void *)get_unaligned(data_buff),
+ data_fifo, (dword_count * 4), PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_TX_DMA);
+ }
+
+ while (mfdcr(DCRN_DMACR0 + (PLB_DMA_CH*8)) & DMA_CE_ENABLE) {
+ }
+ dma_sts = (uint32_t)ppc4xx_get_dma_status();
+#ifdef OTG_PLB_DMA_DBG
+ if (!(dma_sts & DMA_CS0)) {
+ printk("Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+#endif
+ if (dma_sts & DMA_CH0_ERR) {
+ printk("Status (Channel Error) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+ ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PLB_DMA_DBG
+ printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR)); /* vj_dbg */
+#endif
+
+#endif /* CONFIG_OTG_PLB_DMA_TASKLET */
+
+
+#else
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* xfer_buff is DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, *data_buff);
+ }
+ } else {
+ /* xfer_buff is not DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, get_unaligned(data_buff));
+ }
+ }
+#endif
+ _hc->xfer_count += byte_count;
+ _hc->xfer_buff += byte_count;
+}
+
+/**
+ * Gets the current USB frame number. This is the frame number from the last
+ * SOF packet.
+ */
+uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t * _core_if)
+{
+ dsts_data_t dsts;
+ dsts.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts);
+ /* read current frame/microfreme number from DSTS register */
+ return dsts.b.soffn;
+}
+
+
+/**
+ * This function reads a setup packet from the Rx FIFO into the destination
+ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
+ * Interrupt routine when a SETUP packet has been received in Slave mode.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _dest Destination buffer for packet data.
+ */
+void dwc_otg_read_setup_packet(dwc_otg_core_if_t * _core_if, uint32_t * _dest)
+{
+ /* Get the 8 bytes of a setup transaction data */
+
+ /* Pop 2 DWORDS off the receive data FIFO into memory */
+ _dest[0] = dwc_read_datafifo32(_core_if->data_fifo[0]);
+ _dest[1] = dwc_read_datafifo32(_core_if->data_fifo[0]);
+}
+
+/**
+ * This function enables EP0 OUT to receive SETUP packets and configures EP0
+ * IN for transmitting packets. It is normally called when the
+ * "Enumeration Done" interrupt occurs.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_activate(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ dsts_data_t dsts;
+ depctl_data_t diepctl;
+ depctl_data_t doepctl;
+ dctl_data_t dctl = {.d32 = 0};
+
+ /* Read the Device Status and Endpoint 0 Control registers */
+ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts);
+ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl);
+ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
+
+ /* Set the MPS of the IN EP based on the enumeration speed */
+ switch (dsts.b.enumspd) {
+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
+ diepctl.b.mps = DWC_DEP0CTL_MPS_64;
+ break;
+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
+ diepctl.b.mps = DWC_DEP0CTL_MPS_8;
+ break;
+ }
+ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
+
+ /* Enable OUT EP for receive */
+ doepctl.b.epena = 1;
+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
+
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+ DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
+
+#endif /* */
+ dctl.b.cgnpinnak = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+ DWC_DEBUGPL(DBG_PCDV, "dctl=%0x\n",
+ dwc_read_reg32(&dev_if->dev_global_regs->dctl));
+}
+
+
+/**
+ * This function activates an EP. The Device EP control register for
+ * the EP is configured as defined in the ep structure. Note: This
+ * function is not used for EP0.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to activate.
+ */
+void dwc_otg_ep_activate(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ depctl_data_t depctl;
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32 = 0};
+ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, _ep->num,
+ (_ep->is_in ? "IN" : "OUT"));
+
+ /* Read DEPCTLn register */
+ if (_ep->is_in == 1) {
+ addr = &dev_if->in_ep_regs[_ep->num]->diepctl;
+ daintmsk.ep.in = 1 << _ep->num;
+ } else {
+ addr = &dev_if->out_ep_regs[_ep->num]->doepctl;
+ daintmsk.ep.out = 1 << _ep->num;
+ }
+
+ /* If the EP is already active don't change the EP Control
+ * register.
+ */
+ depctl.d32 = dwc_read_reg32(addr);
+ if (!depctl.b.usbactep) {
+ depctl.b.mps = _ep->maxpacket;
+ depctl.b.eptype = _ep->type;
+ depctl.b.txfnum = _ep->tx_fifo_num;
+ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) {
+ depctl.b.setd0pid = 1; // ???
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ depctl.b.usbactep = 1;
+ dwc_write_reg32(addr, depctl.d32);
+ DWC_DEBUGPL(DBG_PCDV, "DEPCTL=%08x\n", dwc_read_reg32(addr));
+ }
+
+ /* Enable the Interrupt for this EP */
+ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, 0,
+ daintmsk.d32);
+ DWC_DEBUGPL(DBG_PCDV, "DAINTMSK=%0x\n",
+ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk));
+ _ep->stall_clear_flag = 0;
+ return;
+}
+
+
+/**
+ * This function deactivates an EP. This is done by clearing the USB Active
+ * EP bit in the Device EP control register. Note: This function is not used
+ * for EP0. EP0 cannot be deactivated.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to deactivate.
+ */
+void dwc_otg_ep_deactivate(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ depctl_data_t depctl = {.d32 = 0};
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32 = 0};
+
+ /* Read DEPCTLn register */
+ if (_ep->is_in == 1) {
+ addr = &_core_if->dev_if->in_ep_regs[_ep->num]->diepctl;
+ daintmsk.ep.in = 1 << _ep->num;
+ } else {
+ addr = &_core_if->dev_if->out_ep_regs[_ep->num]->doepctl;
+ daintmsk.ep.out = 1 << _ep->num;
+ }
+ depctl.b.usbactep = 0;
+ dwc_write_reg32(addr, depctl.d32);
+
+ /* Disable the Interrupt for this EP */
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->daintmsk,
+ daintmsk.d32, 0);
+ return;
+}
+
+
+/**
+ * This function does the setup for a data transfer for an EP and
+ * starts the transfer. For an IN transfer, the packets will be
+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to start the transfer on.
+ */
+void dwc_otg_ep_start_transfer(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ /** @todo Refactor this funciton to check the transfer size
+ * count value does not execed the number bits in the Transfer
+ * count register. */
+ depctl_data_t depctl;
+ deptsiz_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+
+#ifdef CHECK_PACKET_COUNTER_WIDTH
+ const uint32_t MAX_XFER_SIZE = _core_if->core_params->max_transfer_size;
+ const uint32_t MAX_PKT_COUNT = _core_if->core_params->max_packet_count;
+ uint32_t num_packets;
+ uint32_t transfer_len;
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[_ep->num];
+ gnptxsts_data_t txstatus;
+ int lvl = SET_DEBUG_LEVEL(DBG_PCD);
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p\n", _ep->num,
+ (_ep->is_in ? "IN" : "OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff);
+ transfer_len = _ep->xfer_len - _ep->xfer_count;
+ if (transfer_len > MAX_XFER_SIZE) {
+ transfer_len = MAX_XFER_SIZE;
+ }
+ if (transfer_len == 0) {
+ num_packets = 1;
+
+ /* OUT EP to recieve Zero-length packet set transfer
+ * size to maxpacket size. */
+ if (!_ep->is_in) {
+ transfer_len = _ep->maxpacket;
+ }
+ } else {
+ num_packets = (transfer_len + _ep->maxpacket - 1) / _ep->maxpacket;
+ if (num_packets > MAX_PKT_COUNT) {
+ num_packets = MAX_PKT_COUNT;
+ }
+ }
+ DWC_DEBUGPL(DBG_PCD, "transfer_len=%d #pckt=%d\n", transfer_len,
+ num_packets);
+ deptsiz.b.xfersize = transfer_len;
+ deptsiz.b.pktcnt = num_packets;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ } /* OUT endpoint */
+ else {
+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (txstatus.b.nptxqspcavail == 0) {
+ DWC_DEBUGPL(DBG_ANY, "TX Queue Full (0x%0x)\n",
+ txstatus.d32);
+ return;
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&in_regs->diepdma, (uint32_t) _ep->xfer_buff);
+ } else {
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintsts,
+ intr_mask.d32, 0);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk,
+ intr_mask.d32,intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0 &&
+ _ep->type != DWC_OTG_EP_TYPE_ISOC) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = (0x1 << _ep->num);
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->
+ dtknqr4_fifoemptymsk,0, fifoemptymsk);
+ }
+ }
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&out_regs->doepdma,(uint32_t) _ep->xfer_buff);
+ }
+ }
+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+ dwc_read_reg32(&out_regs->doepctl),
+ dwc_read_reg32(&out_regs->doeptsiz));
+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk),
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+ SET_DEBUG_LEVEL(lvl);
+
+#endif /* */
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p\n", _ep->num,
+ (_ep->is_in ? "IN" : "OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff);
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[_ep->num];
+ gnptxsts_data_t gtxstatus;
+ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (_core_if->en_multiple_tx_fifo == 0 &&
+ gtxstatus.b.nptxqspcavail == 0) {
+#ifdef CONFIG_DWC_DEBUG
+ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32);
+#endif /* */
+ return;
+ }
+ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
+
+ /* Zero Length Packet? */
+ if (_ep->xfer_len == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Since the 405EZ (Ultra) only support 2047 bytes as
+ * max transfer size, we have to split up bigger transfers
+ * into multiple transfers of 1024 bytes sized messages.
+ * I happens often, that transfers of 4096 bytes are
+ * required (zero-gadget, file_storage-gadget).
+ */
+ if (_ep->xfer_len > MAX_XFER_LEN) {
+ _ep->bytes_pending = _ep->xfer_len - MAX_XFER_LEN;
+ _ep->xfer_len = MAX_XFER_LEN;
+ }
+#endif
+
+ deptsiz.b.xfersize = _ep->xfer_len;
+ deptsiz.b.pktcnt = (_ep->xfer_len - 1 + _ep->maxpacket) / _ep->maxpacket;
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(in_regs->diepdma), (uint32_t) _ep->dma_addr);
+ } else {
+ if (_ep->type != DWC_OTG_EP_TYPE_ISOC) {
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt,
+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
+ * the data will be written into the fifo by the ISR.
+ */
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintsts,
+ intr_mask.d32, 0);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = 1 << _ep->num;
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->
+ dtknqr4_fifoemptymsk,0,fifoemptymsk);
+ }
+ }
+ }
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+ if (_core_if->dma_enable) {
+ depctl.d32 = dwc_read_reg32(&_core_if->dev_if->in_ep_regs[0]->diepctl);
+ depctl.b.nextep = _ep->num;
+ dwc_write_reg32(&_core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
+ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
+
+ /* Program the transfer size and packet count as follows:
+ *
+ * pktcnt = N
+ * xfersize = N * maxpacket
+ */
+ if (_ep->xfer_len == 0) {
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = _ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket;
+ }
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
+ _ep->num, deptsiz.b.xfersize, deptsiz.b.pktcnt);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(out_regs->doepdma),
+ (uint32_t) _ep->dma_addr);
+ }
+ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) {
+ /** @todo NGS: dpid is read-only. Use setd0pid
+ * or setd1pid. */
+ if (_ep->even_odd_frame) {
+ depctl.b.setd1pid = 1;
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+ dwc_read_reg32(&out_regs->doepctl),
+ dwc_read_reg32(&out_regs->doeptsiz));
+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk),
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+ }
+}
+
+/**
+ * This function does the setup for a data transfer for EP0 and starts
+ * the transfer. For an IN transfer, the packets will be loaded into
+ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
+ * unloaded from the Rx FIFO in the ISR.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ volatile depctl_data_t depctl;
+ volatile deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p total_len=%d\n",
+ _ep->num, (_ep->is_in ? "IN" : "OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff,
+ _ep->total_len);
+ _ep->total_len = _ep->xfer_len;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[0];
+ gnptxsts_data_t gtxstatus;
+ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (_core_if->en_multiple_tx_fifo == 0 &&
+ gtxstatus.b.nptxqspcavail == 0) {
+#ifdef CONFIG_DWC_DEBUG
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+ DWC_DEBUGPL(DBG_PCD, "DIEPCTL0=%0x\n",
+ dwc_read_reg32(&in_regs->diepctl));
+ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
+ deptsiz.d32, deptsiz.b.xfersize,deptsiz.b.pktcnt);
+ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", gtxstatus.d32);
+#endif /* */
+ printk("TX Queue or FIFO Full!!!!\n"); // test-only
+ return;
+ }
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+ /* Zero Length Packet? */
+ if (_ep->xfer_len == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ if (_ep->xfer_len > _ep->maxpacket) {
+ _ep->xfer_len = _ep->maxpacket;
+ deptsiz.b.xfersize = _ep->maxpacket;
+ } else {
+ deptsiz.b.xfersize = _ep->xfer_len;
+ }
+ deptsiz.b.pktcnt = 1;
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV,"IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(in_regs->diepdma), (uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!_core_if->dma_enable) {
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintsts, intr_mask.d32, 0);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, intr_mask.d32,
+ intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk |= 1 << _ep->num;
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ 0, fifoemptymsk);
+ }
+ }
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
+
+ /* Program the transfer size and packet count as follows:
+ * xfersize = N * (maxpacket + 4 - (maxpacket % 4))
+ * pktcnt = N */
+ if (_ep->xfer_len == 0) {
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = _ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket;
+ }
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(out_regs->doepdma), (uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&(out_regs->doepctl), depctl.d32);
+ }
+}
+
+
+/**
+ * This function continues control IN transfers started by
+ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a
+ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
+ * bit for the packet count.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t * _core_if,
+ dwc_ep_t * _ep)
+{
+ depctl_data_t depctl;
+ deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[0];
+ gnptxsts_data_t tx_status = {.d32 = 0};
+ tx_status.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+
+ /** @todo Should there be check for room in the Tx
+ * Status Queue. If not remove the code above this comment. */
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.xfersize = (_ep->total_len - _ep->xfer_count) >
+ _ep->maxpacket ? _ep->maxpacket : (_ep->total_len -
+ _ep->xfer_count);
+ deptsiz.b.pktcnt = 1;
+ _ep->xfer_len += deptsiz.b.xfersize;
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV,"IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
+ dwc_write_reg32(&(in_regs->diepdma),(uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!_core_if->dma_enable) {
+ /* First clear it from GINTSTS */
+ intr_mask.b.nptxfempty = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts,
+ intr_mask.d32);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ }
+ }
+}
+
+#ifdef CONFIG_DWC_DEBUG
+void dump_msg(const u8 * buf, unsigned int length)
+{
+ unsigned int start, num, i;
+ char line[52], *p;
+ if (length >= 512)
+ return;
+ start = 0;
+ while (length > 0) {
+ num = min(length, 16u);
+ p = line;
+ for (i = 0; i < num; ++i) {
+ if (i == 8)
+ *p++ = ' ';
+ sprintf(p, " %02x", buf[i]);
+ p += 3;
+ }
+ *p = 0;
+ DWC_PRINT("%6x: %s\n", start, line);
+ buf += num;
+ start += num;
+ length -= num;
+ }
+}
+
+
+#else /* */
+static inline void dump_msg(const u8 * buf, unsigned int length)
+{
+}
+#endif /* */
+
+/**
+ * This function writes a packet into the Tx FIFO associated with the
+ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For
+ * periodic EPs the periodic Tx FIFO associated with the EP is written
+ * with all packets for the next micro-frame.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to write packet for.
+ * @param _dma Indicates if DMA is being used.
+ */
+void dwc_otg_ep_write_packet(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep,
+ int _dma)
+{
+ /**
+ * The buffer is padded to DWORD on a per packet basis in
+ * slave/dma mode if the MPS is not DWORD aligned. The last
+ * packet, if short, is also padded to a multiple of DWORD.
+ *
+ * ep->xfer_buff always starts DWORD aligned in memory and is a
+ * multiple of DWORD in length
+ *
+ * ep->xfer_len can be any number of bytes
+ *
+ * ep->xfer_count is a multiple of ep->maxpacket until the last
+ * packet
+ *
+ * FIFO access is DWORD */
+#ifndef CONFIG_OTG_PLB_DMA
+ uint32_t i;
+#endif
+ uint32_t byte_count;
+ uint32_t dword_count;
+ uint32_t * fifo;
+ uint32_t * data_buff = (uint32_t *) _ep->xfer_buff;
+#if !defined( CONFIG_OTG_PLB_DMA_TASKLET) && defined(CONFIG_OTG_PLB_DMA)
+ uint32_t dma_sts = 0;
+#endif
+ //DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, _core_if, _ep);
+ if (_ep->xfer_count >= _ep->xfer_len) {
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s() No data for EP%d!!!\n", __func__, _ep->num);
+ return;
+ }
+
+ /* Find the byte length of the packet either short packet or MPS */
+ if ((_ep->xfer_len - _ep->xfer_count) < _ep->maxpacket) {
+ byte_count = _ep->xfer_len - _ep->xfer_count;
+ } else {
+ byte_count = _ep->maxpacket;
+ }
+
+ /* Find the DWORD length, padded by extra bytes as neccessary if MPS
+ * is not a multiple of DWORD */
+ dword_count = (byte_count + 3) / 4;
+
+#ifdef VERBOSE
+ dump_msg(_ep->xfer_buff, byte_count);
+#endif /* */
+
+ /**@todo NGS Where are the Periodic Tx FIFO addresses
+ * intialized? What should this be? */
+ fifo = _core_if->data_fifo[_ep->num];
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n",
+ fifo, data_buff, *data_buff, byte_count);
+ if (!_dma) {
+#ifdef CONFIG_OTG_PLB_DMA
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (byte_count < USB_BUFSIZ) {
+ int i;
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(fifo, *data_buff);
+ }
+ }
+ else {
+ DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n", __func__, dword_count);
+ atomic_set(& release_later, 1);
+ //disable_irq_nosync(94);
+ dwc_otg_disable_global_interrupts(_core_if);
+
+ _core_if->dma_xfer.dma_data_buff = data_buff;
+ _core_if->dma_xfer.dma_data_fifo = fifo;
+ _core_if->dma_xfer.dma_count = dword_count;
+ _core_if->dma_xfer.dma_dir = OTG_TX_DMA;
+ tasklet_schedule(_core_if->plbdma_tasklet);
+ }
+#else /* !CONFIG_OTG_PLB_DMA_TASKLET */
+ ppc4xx_start_plb_dma(_core_if, data_buff, fifo, (dword_count * 4),
+ PLB_DMA_INT_DIS , PLB_DMA_CH, OTG_TX_DMA);
+ while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE) {
+ }
+ dma_sts = (uint32_t)ppc4xx_get_dma_status();
+#ifdef OTG_PLB_DMA_DBG
+ if (!(dma_sts & DMA_CS0)) {
+ printk("DMA Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+#endif
+ if (dma_sts & DMA_CH0_ERR) {
+ printk("DMA Status (Channel 0 Error) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+ ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PLB_DMA_DBG
+ printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR)); /* vj_dbg */
+#endif
+#endif /* CONFIG_OTG_PLB_DMA_TASKLET */
+
+#else /* DWC_SLAVE mode */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(fifo, *data_buff);
+ }
+#endif
+ }
+
+ _ep->xfer_count += byte_count;
+ _ep->xfer_buff += byte_count;
+ _ep->dma_addr += byte_count;
+}
+
+
+/**
+ * Set the EP STALL.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to set the stall on.
+ */
+void dwc_otg_ep_set_stall(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num,
+ (_ep->is_in ? "IN" : "OUT"));
+ if (_ep->is_in == 1) {
+ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl);
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* set the disable and stall bits */
+ if (depctl.b.epena) {
+ depctl.b.epdis = 1;
+ }
+ depctl.b.stall = 1;
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ } else {
+ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl);
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* set the stall bit */
+ depctl.b.stall = 1;
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ }
+ DWC_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", dwc_read_reg32(depctl_addr));
+ return;
+}
+
+
+/**
+ * Clear the EP STALL.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to clear stall from.
+ */
+void dwc_otg_ep_clear_stall(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num,
+ (_ep->is_in ? "IN" : "OUT"));
+ if (_ep->is_in == 1) {
+ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl);
+ } else {
+ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl);
+ }
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* clear the stall bits */
+ depctl.b.stall = 0;
+
+ /*
+ * USB Spec 9.4.5: For endpoints using data toggle, regardless
+ * of whether an endpoint has the Halt feature set, a
+ * ClearFeature(ENDPOINT_HALT) request always results in the
+ * data toggle being reinitialized to DATA0.
+ */
+ if (_ep->type == DWC_OTG_EP_TYPE_INTR ||
+ _ep->type == DWC_OTG_EP_TYPE_BULK) {
+ depctl.b.setd0pid = 1; /* DATA0 */
+ }
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ DWC_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", dwc_read_reg32(depctl_addr));
+ return;
+}
+
+
+/**
+ * This function reads a packet from the Rx FIFO into the destination
+ * buffer. To read SETUP data use dwc_otg_read_setup_packet.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _dest Destination buffer for the packet.
+ * @param _bytes Number of bytes to copy to the destination.
+ */
+void dwc_otg_read_packet(dwc_otg_core_if_t * _core_if,
+ uint8_t * _dest, uint16_t _bytes)
+{
+#ifndef CONFIG_OTG_PLB_DMA
+ int i;
+#endif
+ int word_count = (_bytes + 3) / 4;
+ volatile uint32_t *fifo = _core_if->data_fifo[0];
+ uint32_t * data_buff = (uint32_t *) _dest;
+#if !defined( CONFIG_OTG_PLB_DMA_TASKLET) && defined(CONFIG_OTG_PLB_DMA)
+ uint32_t dma_sts = 0;
+#endif
+
+ /**
+ * @todo Account for the case where _dest is not dword aligned. This
+ * requires reading data from the FIFO into a uint32_t temp buffer,
+ * then moving it into the data buffer.
+ */
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV | DBG_SP), "%s(%p,%p,%d)\n", __func__,
+ _core_if, _dest, _bytes);
+#ifdef CONFIG_OTG_PLB_DMA
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if ( _bytes < USB_BUFSIZ) {
+ int i;
+ for (i = 0; i < word_count; i++, data_buff++) {
+ *data_buff = dwc_read_datafifo32(fifo);
+ }
+ } else {
+ DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n", __func__, _bytes);
+ atomic_set(& release_later, 1);
+ //disable_irq_nosync(94);
+ dwc_otg_disable_global_interrupts(_core_if);
+
+ /* plbdma tasklet */
+ _core_if->dma_xfer.dma_data_buff = data_buff;
+ _core_if->dma_xfer.dma_data_fifo = (void *)fifo;
+ _core_if->dma_xfer.dma_count = word_count;
+ _core_if->dma_xfer.dma_dir = OTG_RX_DMA;
+ tasklet_schedule(_core_if->plbdma_tasklet);
+ }
+#else /* !CONFIG_OTG_PLB_DMA_TASKLET */
+ ppc4xx_start_plb_dma(_core_if,(void *)fifo,data_buff, (word_count * 4),
+ PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_RX_DMA);
+ while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE) {
+ }
+ dma_sts = (uint32_t)ppc4xx_get_dma_status();
+#ifdef OTG_PLB_DMA_DBG
+ if (!(dma_sts & DMA_CS0)) {
+ printk("DMA Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+#endif
+ if (dma_sts & DMA_CH0_ERR) {
+ printk("DMA Status (Channel 0 Error) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+ ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PLB_DMA_DBG
+ printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR));
+ printk(" Rxed buffer \n");
+ for( i=0; i< _bytes; i++) {
+ printk(" 0x%02x",*(_dest +i));
+ }
+ printk(" \n End of Rxed buffer \n");
+#endif
+#endif /* CONFIG_OTG_PLB_DMA_TASKLET */
+
+#else /* DWC_SLAVE mode */
+ for (i = 0; i < word_count; i++, data_buff++) {
+ *data_buff = dwc_read_datafifo32(fifo);
+ }
+#endif
+ return;
+}
+
+
+/**
+ * This functions reads the device registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_dev_registers(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+ DWC_PRINT("Device Global Registers\n");
+ addr = &_core_if->dev_if->dev_global_regs->dcfg;
+ DWC_PRINT("DCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dctl;
+ DWC_PRINT("DCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dsts;
+ DWC_PRINT("DSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->diepmsk;
+ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->doepmsk;
+ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->daint;
+ DWC_PRINT("DAINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr1;
+ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 6) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr2;
+ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n",
+ (uint32_t) addr, dwc_read_reg32(addr));
+ }
+ addr = &_core_if->dev_if->dev_global_regs->dvbusdis;
+ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dvbuspulse;
+ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n",
+ (uint32_t) addr, dwc_read_reg32(addr));
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 14) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
+ DWC_PRINT("DTKNQR3 @0x%08X : 0x%08X\n",
+ (uint32_t) addr, dwc_read_reg32(addr));
+ }
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 22) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
+ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ for (i = 0; i <= _core_if->dev_if->num_in_eps; i++) {
+ DWC_PRINT("Device IN EP %d Registers\n", i);
+ addr = &_core_if->dev_if->in_ep_regs[i]->diepctl;
+ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->diepint;
+ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->dieptsiz;
+ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->diepdma;
+ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->dtxfsts;
+ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ for (i = 0; i <= _core_if->dev_if->num_out_eps; i++) {
+ DWC_PRINT("Device OUT EP %d Registers\n", i);
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepctl;
+ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepfn;
+ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepint;
+ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doeptsiz;
+ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepdma;
+ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ return;
+}
+
+
+/**
+ * This function reads the host registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_host_registers(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+ DWC_PRINT("Host Global Registers\n");
+ addr = &_core_if->host_if->host_global_regs->hcfg;
+ DWC_PRINT("HCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->hfir;
+ DWC_PRINT("HFIR @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->hfnum;
+ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->hptxsts;
+ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->haint;
+ DWC_PRINT("HAINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->haintmsk;
+ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = _core_if->host_if->hprt0;
+ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ for (i = 0; i < _core_if->core_params->host_channels; i++) {
+ DWC_PRINT("Host Channel %d Specific Registers\n", i);
+ addr = &_core_if->host_if->hc_regs[i]->hcchar;
+ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcsplt;
+ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcint;
+ DWC_PRINT("HCINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcintmsk;
+ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hctsiz;
+ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcdma;
+ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ return;
+}
+
+
+/**
+ * This function reads the core global registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_global_registers(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+ DWC_PRINT("Core Global Registers");
+ addr = &_core_if->core_global_regs->gotgctl;
+ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gotgint;
+ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gahbcfg;
+ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gusbcfg;
+ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->grstctl;
+ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gintsts;
+ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gintmsk;
+ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->grxstsr;
+ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+
+ //addr=&_core_if->core_global_regs->grxstsp;
+ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->grxfsiz;
+ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gnptxfsiz;
+ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gnptxsts;
+ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gi2cctl;
+ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gpvndctl;
+ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ggpio;
+ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->guid;
+ DWC_PRINT("GUID @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gsnpsid;
+ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg1;
+ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg2;
+ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg3;
+ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg4;
+ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->hptxfsiz;
+ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ addr = &_core_if->core_global_regs->dptxfsiz_dieptxf[i];
+ DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n", i,
+ (uint32_t) addr, dwc_read_reg32(addr));
+ }
+}
+
+
+/**
+ * Flush a Tx FIFO.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _num Tx FIFO to flush.
+ */
+extern void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t * _core_if,
+ const int _num)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+ DWC_DEBUGPL((DBG_CIL | DBG_PCDV), "Flush Tx FIFO %d\n", _num);
+ greset.b.txfflsh = 1;
+ greset.b.txfnum = _num;
+ dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 10000) {
+ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
+ __func__, greset.d32, dwc_read_reg32(&global_regs->gnptxsts));
+ break;
+ }
+ udelay(1);
+ } while (greset.b.txfflsh == 1);
+ /* Wait for 3 PHY Clocks */
+ UDELAY(1);
+}
+
+
+/**
+ * Flush Rx FIFO.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+ DWC_DEBUGPL((DBG_CIL | DBG_PCDV), "%s\n", __func__);
+
+ /*
+ *
+ */
+ greset.b.rxfflsh = 1;
+ dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 10000) {
+ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, greset.d32);
+ break;
+ }
+ udelay(1);
+ } while (greset.b.rxfflsh == 1);
+
+ /* Wait for 3 PHY Clocks */
+ UDELAY(1);
+}
+
+
+/**
+ * Do core a soft reset of the core. Be careful with this because it
+ * resets all the internal state machines of the core.
+ */
+void dwc_otg_core_reset(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__);
+
+ /* Wait for AHB master IDLE state. */
+ do {
+ UDELAY(10);
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 100000) {
+ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, greset.d32);
+ return;
+ }
+ } while (greset.b.ahbidle == 0);
+
+ /* Core Soft Reset */
+ count = 0;
+ greset.b.csftrst = 1;
+ dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 10000) {
+ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, greset.d32);
+ break;
+ }
+ udelay(1);
+ } while (greset.b.csftrst == 1);
+
+ /* Wait for 3 PHY Clocks */
+ //DWC_PRINT("100ms\n");
+ MDELAY(100);
+}
+
+
+/**
+ * Register HCD callbacks. The callbacks are used to start and stop
+ * the HCD for interrupt processing.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _cb the HCD callback structure.
+ * @param _p pointer to be passed to callback function (usb_hcd*).
+ */
+extern void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t * _core_if,
+ dwc_otg_cil_callbacks_t * _cb, void *_p)
+{
+ _core_if->hcd_cb = _cb;
+ _cb->p = _p;
+}
+
+/**
+ * Register PCD callbacks. The callbacks are used to start and stop
+ * the PCD for interrupt processing.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _cb the PCD callback structure.
+ * @param _p pointer to be passed to callback function (pcd*).
+ */
+extern void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t * _core_if,
+ dwc_otg_cil_callbacks_t * _cb, void *_p)
+{
+ _core_if->pcd_cb = _cb;
+ _cb->p = _p;
+}
+
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c
index a813327bf40..6b83ee46440 100644
--- a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c
@@ -31,7 +31,7 @@
* DAMAGE.
* ========================================================================== */
-#ifndef DWC_DEVICE_ONLY
+#ifndef CONFIG_DWC_DEVICE_ONLY
/**
* @file
@@ -78,21 +78,21 @@ static const struct hc_driver dwc_otg_hc_driver =
.hcd_priv_size = sizeof(dwc_otg_hcd_t),
.irq = dwc_otg_hcd_irq,
.flags = HCD_MEMORY | HCD_USB2,
- //.reset =
- .start = dwc_otg_hcd_start,
+ //.reset =
+ .start = dwc_otg_hcd_start,
#ifdef CONFIG_PM
.bus_suspend = dwc_otg_hcd_suspend,
- .bus_resume = dwc_otg_hcd_resume,
+ .bus_resume = dwc_otg_hcd_resume,
#endif
- .stop = dwc_otg_hcd_stop,
+ .stop = dwc_otg_hcd_stop,
.urb_enqueue = dwc_otg_hcd_urb_enqueue,
.urb_dequeue = dwc_otg_hcd_urb_dequeue,
.endpoint_disable = dwc_otg_hcd_endpoint_disable,
.get_frame_number = dwc_otg_hcd_get_frame_number,
.hub_status_data = dwc_otg_hcd_hub_status_data,
.hub_control = dwc_otg_hcd_hub_control,
- //.hub_suspend =
- //.hub_resume =
+ //.hub_suspend =
+ //.hub_resume =
};
@@ -159,7 +159,7 @@ static int32_t dwc_otg_hcd_stop_cb(void *_p)
static void del_xfer_timers(dwc_otg_hcd_t * _hcd)
{
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
int i;
int num_channels = _hcd->core_if->core_params->host_channels;
for (i = 0; i < num_channels; i++) {
@@ -394,7 +394,7 @@ static struct tasklet_struct reset_tasklet =
};
-#ifdef OTG_PLB_DMA_TASKLET
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
/**
* plbdma tasklet function
*/
@@ -448,6 +448,8 @@ static struct tasklet_struct plbdma_tasklet =
* USB bus with the core and calls the hc_driver->start() function. It returns
* a negative error on failure.
*/
+int init_hcd_usecs(dwc_otg_hcd_t *_hcd);
+
int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device)
{
struct usb_hcd *hcd = NULL;
@@ -475,6 +477,7 @@ int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_de
dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
dwc_otg_hcd->core_if = otg_dev->core_if;
otg_dev->hcd = dwc_otg_hcd;
+ spin_lock_init(&dwc_otg_hcd->lock);
/* Register the HCD CIL Callbacks */
dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
&hcd_cil_callbacks, hcd);
@@ -507,7 +510,7 @@ int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_de
channel->hc_num = i;
dwc_otg_hcd->hc_ptr_array[i] = channel;
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
#endif /* */
DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i,channel);
@@ -520,7 +523,7 @@ int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_de
reset_tasklet.data = (unsigned long)dwc_otg_hcd;
dwc_otg_hcd->reset_tasklet = &reset_tasklet;
-#ifdef OTG_PLB_DMA_TASKLET
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
/* Initialize plbdma tasklet. */
plbdma_tasklet.data = (unsigned long)dwc_otg_hcd->core_if;
dwc_otg_hcd->core_if->plbdma_tasklet = &plbdma_tasklet;
@@ -536,6 +539,9 @@ int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_de
_dev->dma_mask = (void *)0;
_dev->coherent_dma_mask = 0;
}
+
+ init_hcd_usecs(dwc_otg_hcd);
+
/*
* Finish generic HCD initialization and start the HCD. This function
* allocates the DMA buffer pool, registers the USB bus, requests the
@@ -564,8 +570,8 @@ int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_de
goto error3;
}
DWC_DEBUGPL(DBG_HCD,
- "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n",
- _dev->bus_id, hcd->self.busnum);
+ "DWC OTG HCD Initialized HCD, usbbus=%d\n",
+ hcd->self.busnum);
return 0;
/* Error conditions */
@@ -612,8 +618,7 @@ static void hcd_reinit(dwc_otg_hcd_t * _hcd)
dwc_hc_t * channel;
_hcd->flags.d32 = 0;
_hcd->non_periodic_qh_ptr = &_hcd->non_periodic_sched_active;
- _hcd->non_periodic_channels = 0;
- _hcd->periodic_channels = 0;
+ _hcd->available_host_channels = _hcd->core_if->core_params->host_channels;
/*
* Put all channels in the free channel list and clean up channel
@@ -690,8 +695,8 @@ static void qh_list_free(dwc_otg_hcd_t * _hcd, struct list_head *_qh_list)
return;
}
- /* Ensure there are no QTDs or URBs left. */
- kill_urbs_in_qh_list(_hcd, _qh_list);
+ /* Ensure there are no QTDs or URBs left. */
+ kill_urbs_in_qh_list(_hcd, _qh_list);
for (item = _qh_list->next; item != _qh_list; item = _qh_list->next) {
qh = list_entry(item, dwc_otg_qh_t, qh_list_entry);
dwc_otg_hcd_qh_remove_and_free(_hcd, qh);
@@ -785,7 +790,7 @@ void dwc_otg_hcd_free(struct usb_hcd *_hcd)
}
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
static void dump_urb_info(struct urb *_urb, char *_fn_name)
{
DWC_PRINT("%s, urb %p\n", _fn_name, _urb);
@@ -911,18 +916,20 @@ int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd,
local_irq_restore(flags);
return retval;
}
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
dump_urb_info(_urb, "dwc_otg_hcd_urb_enqueue");
}
#endif /* */
if (!dwc_otg_hcd->flags.b.port_connect_status) {
/* No longer connected. */
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
local_irq_restore(flags);
return -ENODEV;
}
qtd = dwc_otg_hcd_qtd_create(_urb);
if (qtd == NULL) {
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
local_irq_restore(flags);
DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
return -ENOMEM;
@@ -931,6 +938,7 @@ int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd,
if (retval < 0) {
DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
"Error status %d\n", retval);
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
dwc_otg_hcd_qtd_free(qtd);
}
local_irq_restore(flags);
@@ -946,9 +954,14 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb *_urb, int _status)
dwc_otg_hcd_t * dwc_otg_hcd;
dwc_otg_qtd_t * urb_qtd;
dwc_otg_qh_t * qh;
+ struct usb_host_endpoint *_ep = dwc_urb_to_endpoint(_urb);
int retval;
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
+
+ if (!_ep)
+ return -EINVAL;
+
local_irq_save(flags);
retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status);
if (retval) {
@@ -967,7 +980,7 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb *_urb, int _status)
if (qh == NULL) {
goto done;
}
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
dump_urb_info(_urb, "dwc_otg_hcd_urb_dequeue");
if (urb_qtd == qh->qtd_in_process) {
@@ -1005,27 +1018,18 @@ int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb *_urb, int _status)
dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
}
done:
- local_irq_restore(flags);
_urb->hcpriv = NULL;
/* Higher layer software sets URB status. */
-#if 1 /* Fixed bug relate kernel hung when unplug cable */
usb_hcd_unlink_urb_from_ep(_hcd, _urb);
usb_hcd_giveback_urb(_hcd, _urb, _status);
+
+ local_irq_restore(flags);
+
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
DWC_PRINT("Called usb_hcd_giveback_urb()\n");
DWC_PRINT(" urb->status = %d\n", _status);
}
-#else
- if (_status != -ECONNRESET) {
- usb_hcd_unlink_urb_from_ep(_hcd, _urb);
- usb_hcd_giveback_urb(_hcd, _urb, _status);
- if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
- DWC_PRINT("Called usb_hcd_giveback_urb()\n");
- DWC_PRINT(" urb->status = %d\n", _status);
- }
- }
-#endif
return 0;
}
@@ -1044,7 +1048,7 @@ void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd,
qh = (dwc_otg_qh_t *) (_ep->hcpriv);
if (qh != NULL) {
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
/** Check that the QTD list is really empty */
if (!list_empty(&qh->qtd_list)) {
DWC_WARN("DWC OTG HCD EP DISABLE:"
@@ -1058,7 +1062,6 @@ void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd,
return;
}
-extern int fscz_debug;
/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
* there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
* interrupt.
@@ -1084,7 +1087,7 @@ int dwc_otg_hcd_hub_status_data(struct usb_hcd *_hcd, char *_buf)
|| dwc_otg_hcd->flags.b.port_suspend_change
|| dwc_otg_hcd->flags.b.port_over_current_change) << 1;
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
if (_buf[0]) {
DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
" Root port status changed\n");
@@ -1821,7 +1824,7 @@ int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd, u16 _typeReq, u16 _wValue,
if (hprt0.b.prtpwr)
port_status |= (1 << USB_PORT_FEAT_POWER);
if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
- port_status |= (1 << USB_PORT_FEAT_HIGHSPEED);
+ port_status |= USB_PORT_STAT_HIGH_SPEED;
else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
@@ -1903,7 +1906,7 @@ int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd, u16 _typeReq, u16 _wValue,
}
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
- MDELAY(60);
+ MDELAY(60);
hprt0.b.prtrst = 0;
dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
break;
@@ -2216,6 +2219,7 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
struct list_head *qh_ptr;
dwc_otg_qh_t * qh;
int num_channels;
+ unsigned long flags;
dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
#ifdef DEBUG_SOF
@@ -2223,9 +2227,20 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
#endif /* */
/* Process entries in the periodic ready list. */
+ num_channels = _hcd->core_if->core_params->host_channels;
qh_ptr = _hcd->periodic_sched_ready.next;
while (qh_ptr != &_hcd->periodic_sched_ready
&& !list_empty(&_hcd->free_hc_list)) {
+
+ // Make sure we leave one channel for non periodic transactions.
+ local_irq_save(flags);
+ if (_hcd->available_host_channels <= 1) {
+ local_irq_restore(flags);
+ break;
+ }
+ _hcd->available_host_channels--;
+ local_irq_restore(flags);
+
qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
assign_and_init_hc(_hcd, qh);
/*
@@ -2233,7 +2248,9 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
* periodic assigned schedule.
*/
qh_ptr = qh_ptr->next;
+ local_irq_save(flags);
list_move(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
+ local_irq_restore(flags);
ret_val = DWC_OTG_TRANSACTION_PERIODIC;
}
/*
@@ -2245,7 +2262,6 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
while (qh_ptr != &_hcd->non_periodic_sched_deferred) {
uint16_t frame_number =
dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
- unsigned long flags;
qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
qh_ptr = qh_ptr->next;
@@ -2269,10 +2285,17 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
*/
qh_ptr = _hcd->non_periodic_sched_inactive.next;
num_channels = _hcd->core_if->core_params->host_channels;
- while (qh_ptr != &_hcd->non_periodic_sched_inactive &&
- (_hcd->non_periodic_channels <
- num_channels - _hcd->periodic_channels)
+ while (qh_ptr != &_hcd->non_periodic_sched_inactive
&& !list_empty(&_hcd->free_hc_list)) {
+
+ local_irq_save(flags);
+ if (_hcd->available_host_channels < 1) {
+ local_irq_restore(flags);
+ break;
+ }
+ _hcd->available_host_channels--;
+ local_irq_restore(flags);
+
qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
assign_and_init_hc(_hcd, qh);
@@ -2281,14 +2304,15 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
* non-periodic active schedule.
*/
qh_ptr = qh_ptr->next;
+ local_irq_save(flags);
list_move(&qh->qh_list_entry,
&_hcd->non_periodic_sched_active);
+ local_irq_restore(flags);
if (ret_val == DWC_OTG_TRANSACTION_NONE) {
ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
} else {
ret_val = DWC_OTG_TRANSACTION_ALL;
}
- _hcd->non_periodic_channels++;
}
return ret_val;
}
@@ -2375,7 +2399,7 @@ static void process_non_periodic_channels(dwc_otg_hcd_t * _hcd)
_hcd->core_if->core_global_regs;
DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n",
tx_status.b.nptxqspcavail);
@@ -2416,7 +2440,7 @@ static void process_non_periodic_channels(dwc_otg_hcd_t * _hcd)
no_fifo_space = 1;
break;
}
-#ifdef OTG_PLB_DMA_TASKLET
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
if (atomic_read(&release_later)) {
break;
}
@@ -2432,8 +2456,8 @@ static void process_non_periodic_channels(dwc_otg_hcd_t * _hcd)
gintmsk_data_t intr_mask = {.d32 = 0};
intr_mask.b.nptxfempty = 1;
-#ifndef OTG_PLB_DMA_TASKLET
-#ifdef DEBUG
+#ifndef CONFIG_OTG_PLB_DMA_TASKLET
+#ifdef CONFIG_DWC_DEBUG
tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n",
tx_status.b.nptxqspcavail);
@@ -2484,7 +2508,7 @@ static void process_periodic_channels(dwc_otg_hcd_t * _hcd)
host_regs = _hcd->core_if->host_if->host_global_regs;
DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n",
tx_status.b.ptxqspcavail);
@@ -2544,7 +2568,7 @@ static void process_periodic_channels(dwc_otg_hcd_t * _hcd)
global_regs = _hcd->core_if->core_global_regs;
intr_mask.b.ptxfempty = 1;
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
DWC_DEBUGPL(DBG_HCDV," P Tx Req Queue Space Avail (after queue): %d\n",
tx_status.b.ptxqspcavail);
@@ -2629,7 +2653,7 @@ __releases(_hcd->lock)
__acquires(_hcd->lock)
{
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
__func__, _urb, usb_pipedevice(_urb->pipe),
@@ -2645,11 +2669,11 @@ __acquires(_hcd->lock)
}
#endif /* */
+ spin_lock(&_hcd->lock);
_urb->hcpriv = NULL;
usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), _urb);
- spin_unlock(&_hcd->lock);
usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb, _status);
- spin_lock(&_hcd->lock);
+ spin_unlock(&_hcd->lock);
}
@@ -2663,7 +2687,7 @@ dwc_otg_qh_t * dwc_urb_to_qh(struct urb *_urb)
}
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
void dwc_print_setup_data(uint8_t * setup)
{
int i;
@@ -2721,7 +2745,7 @@ void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd)
{
/*
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
DWC_PRINT("Frame remaining at SOF:\n");
DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
_hcd->frrem_samples, _hcd->frrem_accum,
@@ -2783,7 +2807,7 @@ void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd)
} void dwc_otg_hcd_dump_state(dwc_otg_hcd_t * _hcd)
{
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
int num_channels;
int i;
gnptxsts_data_t np_tx_status;
@@ -2873,9 +2897,7 @@ void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd)
urb->actual_length);
}
}
- } DWC_PRINT(" non_periodic_channels: %d\n",
- _hcd->non_periodic_channels);
- DWC_PRINT(" periodic_channels: %d\n", _hcd->periodic_channels);
+ }
DWC_PRINT(" periodic_usecs: %d\n", _hcd->periodic_usecs);
np_tx_status.d32 =
dwc_read_reg32(&_hcd->core_if->core_global_regs->gnptxsts);
@@ -2897,4 +2919,4 @@ void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd)
#endif /* */
}
-#endif /* DWC_DEVICE_ONLY */
+#endif /* CONFIG_DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c.org b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c.org
new file mode 100644
index 00000000000..a813327bf40
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c.org
@@ -0,0 +1,2900 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.c $
+ * $Revision: #16 $
+ * $Date: 2006/12/05 $
+ * $Change: 762293 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the implementation of the HCD. In Linux, the HCD
+ * implements the hc_driver API.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+extern atomic_t release_later;
+
+static u64 dma_mask = DMA_BIT_MASK(32);
+
+static const char dwc_otg_hcd_name[] = "dwc_otg_hcd";
+
+static int dwc_otg_hcd_suspend(struct usb_hcd *hcd)
+{
+ /* FIXME: Write code to right suspend processing */
+ return 0;
+}
+
+static int dwc_otg_hcd_resume(struct usb_hcd *hcd)
+{
+ /* FIXME: Write code to right resume processing */
+ return 0;
+}
+
+static const struct hc_driver dwc_otg_hc_driver =
+{
+ .description = dwc_otg_hcd_name,
+ .product_desc = "DWC OTG Controller",
+ .hcd_priv_size = sizeof(dwc_otg_hcd_t),
+ .irq = dwc_otg_hcd_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+ //.reset =
+ .start = dwc_otg_hcd_start,
+#ifdef CONFIG_PM
+ .bus_suspend = dwc_otg_hcd_suspend,
+ .bus_resume = dwc_otg_hcd_resume,
+#endif
+ .stop = dwc_otg_hcd_stop,
+ .urb_enqueue = dwc_otg_hcd_urb_enqueue,
+ .urb_dequeue = dwc_otg_hcd_urb_dequeue,
+ .endpoint_disable = dwc_otg_hcd_endpoint_disable,
+ .get_frame_number = dwc_otg_hcd_get_frame_number,
+ .hub_status_data = dwc_otg_hcd_hub_status_data,
+ .hub_control = dwc_otg_hcd_hub_control,
+ //.hub_suspend =
+ //.hub_resume =
+};
+
+
+/**
+ * Work queue function for starting the HCD when A-Cable is connected.
+ * The dwc_otg_hcd_start() must be called in a process context.
+ */
+static void hcd_start_func(struct work_struct *work)
+{
+ struct dwc_otg_hcd *priv =
+ container_of(work, struct dwc_otg_hcd, start_work);
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)priv->_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd);
+ if (usb_hcd) {
+ dwc_otg_hcd_start(usb_hcd);
+ }
+}
+
+
+/**
+ * HCD Callback function for starting the HCD when A-Cable is
+ * connected.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_start_cb(void *_p)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0;
+ if (core_if->op_state == B_HOST) {
+ /*
+ * Reset the port. During a HNP mode switch the reset
+ * needs to occur within 1ms and have a duration of at
+ * least 50ms.
+ */
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ ((struct usb_hcd *)_p)->self.is_b_host = 1;
+ } else {
+ ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ }
+ /* Need to start the HCD in a non-interrupt context. */
+ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
+ dwc_otg_hcd->_p = _p;
+ schedule_work(&dwc_otg_hcd->start_work);
+ return 1;
+}
+
+
+/**
+ * HCD Callback function for stopping the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_stop_cb(void *_p)
+{
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_stop(usb_hcd);
+ return 1;
+}
+static void del_xfer_timers(dwc_otg_hcd_t * _hcd)
+{
+
+#ifdef DEBUG
+ int i;
+ int num_channels = _hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ del_timer(&_hcd->core_if->hc_xfer_timer[i]);
+ }
+
+#endif /* */
+}
+static void del_timers(dwc_otg_hcd_t * _hcd)
+{
+ del_xfer_timers(_hcd);
+ del_timer(&_hcd->conn_timer);
+}
+
+/**
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ */
+static void kill_urbs_in_qh_list(dwc_otg_hcd_t * _hcd,
+ struct list_head *_qh_list)
+{
+ struct list_head *qh_item;
+ dwc_otg_qh_t * qh;
+ struct list_head *qtd_item;
+ dwc_otg_qtd_t * qtd;
+ list_for_each(qh_item, _qh_list) {
+ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry);
+ for (qtd_item = qh->qtd_list.next; qtd_item != &qh->qtd_list;
+ qtd_item = qh->qtd_list.next) {
+ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
+ if (qtd->urb != NULL) {
+ dwc_otg_hcd_complete_urb(_hcd, qtd->urb,-ETIMEDOUT);
+ }
+ dwc_otg_hcd_qtd_remove_and_free(qtd);
+ }
+ }
+}
+
+/**
+ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ */
+static void kill_all_urbs(dwc_otg_hcd_t * _hcd)
+{
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_deferred);
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_inactive);
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_active);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_inactive);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_ready);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assigned);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queued);
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_disconnect_cb(void *_p)
+{
+ gintsts_data_t intr;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+
+ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+
+ /*
+ * Set status flags for the hub driver.
+ */
+ dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ dwc_otg_hcd->flags.b.port_connect_status = 0;
+
+ /*
+ * Shutdown any transfers in process by clearing the Tx FIFO Empty
+ * interrupt mask and status bits and disabling subsequent host
+ * channel interrupts.
+ */
+ intr.d32 = 0;
+ intr.b.nptxfempty = 1;
+ intr.b.ptxfempty = 1;
+ intr.b.hcintr = 1;
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk,
+ intr.d32, 0);
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts,
+ intr.d32, 0);
+ del_timers(dwc_otg_hcd);
+
+ /*
+ * Turn off the vbus power only if the core has transitioned to device
+ * mode. If still in host mode, need to keep power on to detect a
+ * reconnection.
+ */
+ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
+ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
+ hprt0_data_t hprt0 = {.d32 = 0};
+ DWC_PRINT("Disconnect: PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0,
+ hprt0.d32);
+ }
+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
+ }
+
+ /* Respond with an error status to all URBs in the schedule. */
+ kill_all_urbs(dwc_otg_hcd);
+ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
+ /* Clean up any host channels that were in use. */
+ int num_channels;
+ int i;
+ dwc_hc_t * channel;
+ dwc_otg_hc_regs_t * hc_regs;
+ hcchar_data_t hcchar;
+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+ if (!dwc_otg_hcd->core_if->dma_enable) {
+ /* Flush out any channel requests in slave mode. */
+ for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr_array[i];
+ if (list_empty(&channel->hc_list_entry)) {
+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar,hcchar.d32);
+ }
+ }
+ }
+ }
+ for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr_array[i];
+ if (list_empty(&channel->hc_list_entry)) {
+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ /* Halt the channel. */
+ hcchar.b.chdis = 1;
+ dwc_write_reg32(&hc_regs->hcchar,hcchar.d32);
+ }
+ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if,channel);
+ list_add_tail(&channel->hc_list_entry,
+ &dwc_otg_hcd->free_hc_list);
+ }
+ }
+ }
+
+ /* A disconnect will end the session so the B-Device is no
+ * longer a B-host. */
+ ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ return 1;
+}
+
+/**
+ * Connection timeout function. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds.
+ */
+void dwc_otg_hcd_connect_timeout(unsigned long _ptr)
+{
+ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)_ptr);
+ DWC_PRINT("Connect Timeout\n");
+ DWC_ERROR("Device Not Connected/Responding\n");
+}
+
+/**
+ * Start the connection timer. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds. The
+ * timer is deleted if a port connect interrupt occurs before the
+ * timer expires.
+ */
+static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t * _hcd)
+{
+ init_timer(&_hcd->conn_timer);
+ _hcd->conn_timer.function = dwc_otg_hcd_connect_timeout;
+ _hcd->conn_timer.data = (unsigned long)0;
+ _hcd->conn_timer.expires = jiffies + (HZ * 10);
+ add_timer(&_hcd->conn_timer);
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_session_start_cb(void *_p)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
+ return 1;
+}
+
+
+/**
+ * HCD Callback structure for handling mode switching.
+ */
+static dwc_otg_cil_callbacks_t hcd_cil_callbacks =
+{
+ .start = dwc_otg_hcd_start_cb,
+ .stop = dwc_otg_hcd_stop_cb,
+ .disconnect = dwc_otg_hcd_disconnect_cb,
+ .session_start = dwc_otg_hcd_session_start_cb,
+ .p = 0,
+};
+
+
+/**
+ * Reset tasklet function
+ */
+static void reset_tasklet_func(unsigned long data)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = (dwc_otg_hcd_t *) data;
+ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0;
+ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ mdelay(60);
+ hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ dwc_otg_hcd->flags.b.port_reset_change = 1;
+ return;
+}
+static struct tasklet_struct reset_tasklet =
+{
+ .next = NULL,
+ .state = 0,
+ .count = ATOMIC_INIT(0),
+ .func = reset_tasklet_func,
+ .data = 0,
+};
+
+
+#ifdef OTG_PLB_DMA_TASKLET
+/**
+ * plbdma tasklet function
+ */
+static void plbdma_tasklet_func(unsigned long data)
+{
+ unsigned long flags;
+ dwc_otg_core_if_t * _core_if = (dwc_otg_core_if_t *) data;
+ dma_xfer_t * dma_xfer = &_core_if->dma_xfer;
+
+ local_irq_save(flags);
+ DWC_DEBUGPL(DBG_SP, "Plbdma tasklet called\n");
+
+ if (_core_if->dma_xfer.dma_dir == OTG_TX_DMA) {
+ if ((((unsigned long)dma_xfer->dma_data_buff) & 0x3) == 0) {
+ /* call tx_dma - src,dest,len,intr */
+ ppc4xx_start_plb_dma(_core_if, (void *)dma_xfer->dma_data_buff,
+ dma_xfer->dma_data_fifo, (dma_xfer->dma_count * 4), PLB_DMA_INT_ENA,
+ PLB_DMA_CH, OTG_TX_DMA);
+ } else {
+ ppc4xx_start_plb_dma(_core_if, (void *)get_unaligned(dma_xfer->dma_data_buff),
+ dma_xfer->dma_data_fifo, (dma_xfer->dma_count * 4), PLB_DMA_INT_ENA,
+ PLB_DMA_CH, OTG_TX_DMA);
+ }
+ }
+ else {
+ DWC_DEBUGPL(DBG_HCD, "0x%p 0x%p %d\n", (void *)dma_xfer->dma_data_fifo,
+ dma_xfer->dma_data_buff, dma_xfer->dma_count );
+
+ ppc4xx_start_plb_dma(_core_if, (void *)dma_xfer->dma_data_fifo,
+ dma_xfer->dma_data_buff, (dma_xfer->dma_count * 4), PLB_DMA_INT_ENA,
+ PLB_DMA_CH, OTG_RX_DMA);
+ }
+
+ local_irq_restore(flags);
+ return;
+}
+static struct tasklet_struct plbdma_tasklet =
+{
+ .next = NULL,
+ .state = 0,
+ .count = ATOMIC_INIT(0),
+ .func = plbdma_tasklet_func,
+ .data = 0,
+};
+
+#endif
+
+/**
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device)
+{
+ struct usb_hcd *hcd = NULL;
+ dwc_otg_hcd_t * dwc_otg_hcd = NULL;
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ int num_channels;
+ int i;
+ dwc_hc_t * channel;
+ int retval = 0;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
+ /*
+ * Allocate memory for the base HCD plus the DWC OTG HCD.
+ * Initialize the base HCD.
+ */
+ hcd = usb_create_hcd(&dwc_otg_hc_driver, _dev, dev_name(_dev));
+ if (hcd == NULL) {
+ retval = -ENOMEM;
+ goto error1;
+ }
+ dev_set_drvdata(_dev, dwc_otg_device); /* fscz restore */
+ hcd->regs = otg_dev->base;
+ hcd->self.otg_port = 1;
+
+ /* Initialize the DWC OTG HCD. */
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+ dwc_otg_hcd->core_if = otg_dev->core_if;
+ otg_dev->hcd = dwc_otg_hcd;
+ /* Register the HCD CIL Callbacks */
+ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
+ &hcd_cil_callbacks, hcd);
+
+ /* Initialize the non-periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive);
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active);
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_deferred);
+
+ /* Initialize the periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+
+ /*
+ * Create a host channel descriptor for each host channel implemented
+ * in the controller. Initialize the channel descriptor array.
+ */
+ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL);
+ if (channel == NULL) {
+ retval = -ENOMEM;
+ DWC_ERROR("%s: host channel allocation failed\n",__func__);
+ goto error2;
+ }
+ memset(channel, 0, sizeof(dwc_hc_t));
+ channel->hc_num = i;
+ dwc_otg_hcd->hc_ptr_array[i] = channel;
+
+#ifdef DEBUG
+ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
+#endif /* */
+ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i,channel);
+ }
+
+ /* Initialize the Connection timeout timer. */
+ init_timer(&dwc_otg_hcd->conn_timer);
+
+ /* Initialize reset tasklet. */
+ reset_tasklet.data = (unsigned long)dwc_otg_hcd;
+ dwc_otg_hcd->reset_tasklet = &reset_tasklet;
+
+#ifdef OTG_PLB_DMA_TASKLET
+ /* Initialize plbdma tasklet. */
+ plbdma_tasklet.data = (unsigned long)dwc_otg_hcd->core_if;
+ dwc_otg_hcd->core_if->plbdma_tasklet = &plbdma_tasklet;
+#endif
+
+ /* Set device flags indicating whether the HCD supports DMA. */
+ if (otg_dev->core_if->dma_enable) {
+ DWC_PRINT("Using DMA mode\n");
+ _dev->dma_mask = &dma_mask;
+ _dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ } else {
+ DWC_PRINT("Using Slave mode\n");
+ _dev->dma_mask = (void *)0;
+ _dev->coherent_dma_mask = 0;
+ }
+ /*
+ * Finish generic HCD initialization and start the HCD. This function
+ * allocates the DMA buffer pool, registers the USB bus, requests the
+ * IRQ line, and calls dwc_otg_hcd_start method.
+ */
+ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED);
+ if (retval < 0) {
+ goto error2;
+ }
+ /*
+ * Allocate space for storing data on status transactions. Normally no
+ * data is sent, but this space acts as a bit bucket. This must be
+ * done after usb_add_hcd since that function allocates the DMA buffer
+ * pool.
+ */
+ if (otg_dev->core_if->dma_enable) {
+ dwc_otg_hcd->status_buf =
+ dma_alloc_coherent(_dev, DWC_OTG_HCD_STATUS_BUF_SIZE,
+ &dwc_otg_hcd->status_buf_dma, GFP_KERNEL | GFP_DMA);
+ } else {
+ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, GFP_KERNEL);
+ }
+ if (dwc_otg_hcd->status_buf == NULL) {
+ retval = -ENOMEM;
+ DWC_ERROR("%s: status_buf allocation failed\n", __func__);
+ goto error3;
+ }
+ DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n",
+ _dev->bus_id, hcd->self.busnum);
+ return 0;
+
+ /* Error conditions */
+ error3:usb_remove_hcd(hcd);
+ error2:dwc_otg_hcd_free(hcd);
+ usb_put_hcd(hcd);
+ error1:return retval;
+}
+
+
+/**
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc_otg_hcd_remove(struct device *_dev)
+{
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_t * dwc_otg_hcd = otg_dev->hcd;
+ struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
+
+ /* Turn off all interrupts */
+ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0);
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1,0);
+ usb_remove_hcd(hcd);
+ dwc_otg_hcd_free(hcd);
+ usb_put_hcd(hcd);
+ return;
+}
+
+
+/* =========================================================================
+ * Linux HC Driver Functions
+ * ========================================================================= */
+
+/**
+ * Initializes dynamic portions of the DWC_otg HCD state.
+ */
+static void hcd_reinit(dwc_otg_hcd_t * _hcd)
+{
+ struct list_head *item;
+ int num_channels;
+ int i;
+ dwc_hc_t * channel;
+ _hcd->flags.d32 = 0;
+ _hcd->non_periodic_qh_ptr = &_hcd->non_periodic_sched_active;
+ _hcd->non_periodic_channels = 0;
+ _hcd->periodic_channels = 0;
+
+ /*
+ * Put all channels in the free channel list and clean up channel
+ * states.
+ */
+ item = _hcd->free_hc_list.next;
+ while (item != &_hcd->free_hc_list) {
+ list_del(item);
+ item = _hcd->free_hc_list.next;
+ }
+ num_channels = _hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ channel = _hcd->hc_ptr_array[i];
+ list_add_tail(&channel->hc_list_entry, &_hcd->free_hc_list);
+ dwc_otg_hc_cleanup(_hcd->core_if, channel);
+ }
+
+ /* Initialize the DWC core for host mode operation. */
+ dwc_otg_core_host_init(_hcd->core_if);
+}
+
+
+/** Initializes the DWC_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure. */
+int dwc_otg_hcd_start(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ //dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ struct usb_device *udev;
+ struct usb_bus *bus;
+
+// int retval;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
+ bus = hcd_to_bus(_hcd);
+
+ /* Initialize the bus state. If the core is in Device Mode
+ * HALT the USB bus and return. */
+
+ _hcd->state = HC_STATE_RUNNING;
+
+ /* Initialize and connect root hub if one is not already attached */
+ if (bus->root_hub) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n");
+
+ /* Inform the HUB driver to resume. */
+ usb_hcd_resume_root_hub(_hcd);
+ }
+
+ else {
+ udev = usb_alloc_dev(NULL, bus, 0);
+ udev->speed = USB_SPEED_HIGH;
+ if (!udev) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n");
+ return -ENODEV;
+ }
+
+ /* Not needed - VJ
+ if ((retval = usb_hcd_register_root_hub(udev, _hcd)) != 0) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error registering %d\n", retval);
+ return -ENODEV;
+ }
+ */
+ }
+ hcd_reinit(dwc_otg_hcd);
+ return 0;
+}
+static void qh_list_free(dwc_otg_hcd_t * _hcd, struct list_head *_qh_list)
+{
+ struct list_head *item;
+ dwc_otg_qh_t * qh;
+ if (_qh_list->next == NULL) {
+ /* The list hasn't been initialized yet. */
+ return;
+ }
+
+ /* Ensure there are no QTDs or URBs left. */
+ kill_urbs_in_qh_list(_hcd, _qh_list);
+ for (item = _qh_list->next; item != _qh_list; item = _qh_list->next) {
+ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ dwc_otg_hcd_qh_remove_and_free(_hcd, qh);
+ }
+}
+
+
+/**
+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+void dwc_otg_hcd_stop(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ hprt0_data_t hprt0 = {.d32 = 0};
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
+
+ /* Turn off all host-specific interrupts. */
+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
+
+ /*
+ * The root hub should be disconnected before this function is called.
+ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+ * and the QH lists (via ..._hcd_endpoint_disable).
+ */
+
+ /* Turn off the vbus power */
+ DWC_PRINT("PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
+ return;
+}
+
+
+/** Returns the current frame number. */
+int dwc_otg_hcd_get_frame_number(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ hfnum_data_t hfnum;
+ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->host_if->
+ host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n",
+ hfnum.b.frnum);
+#endif /* */
+ return hfnum.b.frnum;
+}
+
+
+/**
+ * Frees secondary storage associated with the dwc_otg_hcd structure contained
+ * in the struct usb_hcd field.
+ */
+void dwc_otg_hcd_free(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ int i;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
+ del_timers(dwc_otg_hcd);
+
+ /* Free memory for QH/QTD lists */
+ qh_list_free(dwc_otg_hcd,
+ &dwc_otg_hcd->non_periodic_sched_inactive);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_deferred);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
+
+ /* Free memory for the host channels. */
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ dwc_hc_t * hc = dwc_otg_hcd->hc_ptr_array[i];
+ if (hc != NULL) {
+ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n",i, hc);
+ kfree(hc);
+ }
+ }
+ if (dwc_otg_hcd->core_if->dma_enable) {
+ if (dwc_otg_hcd->status_buf_dma) {
+ dma_free_coherent(_hcd->self.controller,
+ DWC_OTG_HCD_STATUS_BUF_SIZE,
+ dwc_otg_hcd->status_buf,
+ dwc_otg_hcd->status_buf_dma);
+ }
+ } else if (dwc_otg_hcd->status_buf != NULL) {
+ kfree(dwc_otg_hcd->status_buf);
+ }
+ return;
+}
+
+
+#ifdef DEBUG
+static void dump_urb_info(struct urb *_urb, char *_fn_name)
+{
+ DWC_PRINT("%s, urb %p\n", _fn_name, _urb);
+ DWC_PRINT(" Device address: %d\n", usb_pipedevice(_urb->pipe));
+ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(_urb->pipe),
+ (usb_pipein(_urb->pipe) ? "IN" : "OUT"));
+ DWC_PRINT(" Endpoint type: %s\n", ( {
+ char *pipetype;
+ switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL:
+ pipetype = "CONTROL"; break;
+ case PIPE_BULK:
+ pipetype = "BULK"; break;
+ case PIPE_INTERRUPT:
+ pipetype = "INTERRUPT"; break;
+ case PIPE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS"; break;
+ default:
+ pipetype = "UNKNOWN"; break;
+ };
+ pipetype;
+ } )) ;
+ DWC_PRINT(" Speed: %s\n", ( {
+ char *speed;
+ switch (_urb->dev->speed) {
+ case USB_SPEED_HIGH:
+ speed = "HIGH"; break;
+ case USB_SPEED_FULL:
+ speed = "FULL"; break;
+ case USB_SPEED_LOW:
+ speed = "LOW"; break;
+ default:
+ speed = "UNKNOWN"; break;
+ };
+ speed;
+ } )) ;
+ DWC_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(_urb->dev, _urb->pipe, usb_pipeout(_urb->pipe)));
+ DWC_PRINT(" Data buffer length: %d\n", _urb->transfer_buffer_length);
+ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n",
+ _urb->transfer_buffer, (void *)_urb->transfer_dma);
+ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", _urb->setup_packet,
+ (void *)_urb->setup_dma);
+ DWC_PRINT(" Interval: %d\n", _urb->interval);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < _urb->number_of_packets; i++) {
+ DWC_PRINT(" ISO Desc %d:\n", i);
+ DWC_PRINT(" offset: %d, length %d\n",
+ _urb->iso_frame_desc[i].offset,
+ _urb->iso_frame_desc[i].length);
+ }
+ }
+}
+static void dump_channel_info(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * qh)
+{
+ if (qh->channel != NULL) {
+ dwc_hc_t * hc = qh->channel;
+ struct list_head *item;
+ dwc_otg_qh_t * qh_item;
+ int num_channels = _hcd->core_if->core_params->host_channels;
+ int i;
+ dwc_otg_hc_regs_t * hc_regs;
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+ hc_regs = _hcd->core_if->host_if->hc_regs[hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+ hcdma = dwc_read_reg32(&hc_regs->hcdma);
+ DWC_PRINT(" Assigned to channel %p:\n", hc);
+ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_packet);
+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
+ DWC_PRINT(" halt_status: %d\n", hc->halt_status);
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
+ DWC_PRINT(" qh: %p\n", hc->qh);
+ DWC_PRINT(" NP inactive sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_inactive) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP active sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_deferred) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP deferred sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_active) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" Channels: \n");
+ for (i = 0; i < num_channels; i++) {
+ dwc_hc_t * hc = _hcd->hc_ptr_array[i];
+ DWC_PRINT(" %2d: %p\n", i, hc);
+ }
+ }
+}
+
+#endif /* */
+
+/** Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB. */
+int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd,
+ struct urb *_urb,
+ gfp_t _mem_flags)
+{
+ unsigned long flags;
+ int retval;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ dwc_otg_qtd_t * qtd;
+
+ local_irq_save(flags);
+ retval = usb_hcd_link_urb_to_ep(_hcd, _urb);
+ if (retval) {
+ local_irq_restore(flags);
+ return retval;
+ }
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_enqueue");
+ }
+#endif /* */
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /* No longer connected. */
+ local_irq_restore(flags);
+ return -ENODEV;
+ }
+ qtd = dwc_otg_hcd_qtd_create(_urb);
+ if (qtd == NULL) {
+ local_irq_restore(flags);
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
+ return -ENOMEM;
+ }
+ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd);
+ if (retval < 0) {
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
+ "Error status %d\n", retval);
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+ local_irq_restore(flags);
+ return retval;
+}
+
+
+/** Aborts/cancels a USB transfer request. Always returns 0 to indicate
+ * success. */
+int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb *_urb, int _status)
+{
+ unsigned long flags;
+ dwc_otg_hcd_t * dwc_otg_hcd;
+ dwc_otg_qtd_t * urb_qtd;
+ dwc_otg_qh_t * qh;
+ int retval;
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
+ local_irq_save(flags);
+ retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status);
+ if (retval) {
+ local_irq_restore(flags);
+ return retval;
+ }
+
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+
+ urb_qtd = (dwc_otg_qtd_t *) _urb->hcpriv;
+ if (urb_qtd == NULL) {
+ printk("urb_qtd is NULL for _urb %08x\n",(unsigned)_urb);
+ goto done;
+ }
+ qh = (dwc_otg_qh_t *) urb_qtd->qtd_qh_ptr;
+ if (qh == NULL) {
+ goto done;
+ }
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_dequeue");
+ if (urb_qtd == qh->qtd_in_process) {
+ dump_channel_info(dwc_otg_hcd, qh);
+ }
+ }
+
+#endif /* */
+ if (urb_qtd == qh->qtd_in_process) {
+ /* The QTD is in process (it has been assigned to a channel). */
+ if (dwc_otg_hcd->flags.b.port_connect_status) {
+
+ /*
+ * If still connected (i.e. in host mode), halt the
+ * channel so it can be used for other transfers. If
+ * no longer connected, the host registers can't be
+ * written to halt the channel since the core is in
+ * device mode.
+ */
+ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel,
+ DWC_OTG_HC_XFER_URB_DEQUEUE);
+ }
+ }
+
+ /*
+ * Free the QTD and clean up the associated QH. Leave the QH in the
+ * schedule if it has any remaining QTDs.
+ */
+ dwc_otg_hcd_qtd_remove_and_free(urb_qtd);
+ if (urb_qtd == qh->qtd_in_process) {
+ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
+ qh->channel = NULL;
+ qh->qtd_in_process = NULL;
+ } else if (list_empty(&qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
+ }
+done:
+ local_irq_restore(flags);
+ _urb->hcpriv = NULL;
+
+ /* Higher layer software sets URB status. */
+#if 1 /* Fixed bug relate kernel hung when unplug cable */
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ usb_hcd_giveback_urb(_hcd, _urb, _status);
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("Called usb_hcd_giveback_urb()\n");
+ DWC_PRINT(" urb->status = %d\n", _status);
+ }
+#else
+ if (_status != -ECONNRESET) {
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ usb_hcd_giveback_urb(_hcd, _urb, _status);
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("Called usb_hcd_giveback_urb()\n");
+ DWC_PRINT(" urb->status = %d\n", _status);
+ }
+ }
+#endif
+ return 0;
+}
+
+
+/** Frees resources in the DWC_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued. */
+void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd,
+ struct usb_host_endpoint *_ep)
+{
+ dwc_otg_qh_t * qh;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
+ "endpoint=%d\n", _ep->desc.bEndpointAddress,
+ dwc_ep_addr_to_endpoint(_ep->desc.bEndpointAddress));
+ qh = (dwc_otg_qh_t *) (_ep->hcpriv);
+ if (qh != NULL) {
+
+#ifdef DEBUG
+ /** Check that the QTD list is really empty */
+ if (!list_empty(&qh->qtd_list)) {
+ DWC_WARN("DWC OTG HCD EP DISABLE:"
+ " QTD List for this endpoint is not empty\n");
+ }
+
+#endif /* */
+ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh);
+ _ep->hcpriv = NULL;
+ }
+ return;
+}
+
+extern int fscz_debug;
+/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs */
+irqreturn_t dwc_otg_hcd_irq(struct usb_hcd * _hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd));
+}
+
+/** Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0. */
+int dwc_otg_hcd_hub_status_data(struct usb_hcd *_hcd, char *_buf)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ _buf[0] = 0;
+ _buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change
+ || dwc_otg_hcd->flags.b.port_reset_change
+ || dwc_otg_hcd->flags.b.port_enable_change
+ || dwc_otg_hcd->flags.b.port_suspend_change
+ || dwc_otg_hcd->flags.b.port_over_current_change) << 1;
+
+#ifdef DEBUG
+ if (_buf[0]) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
+ " Root port status changed\n");
+ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
+ dwc_otg_hcd->flags.b.port_connect_status_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
+ dwc_otg_hcd->flags.b.port_reset_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
+ dwc_otg_hcd->flags.b.port_enable_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
+ dwc_otg_hcd->flags.b.port_suspend_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
+ dwc_otg_hcd->flags.b.port_over_current_change);
+ }
+
+#endif /* */
+ return (_buf[0] != 0);
+}
+
+
+#ifdef DWC_HS_ELECT_TST
+/*
+ * Quick and dirty hack to implement the HS Electrical Test
+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
+ *
+ * This code was copied from our userspace app "hset". It sends a
+ * Get Device Descriptor control sequence in two parts, first the
+ * Setup packet by itself, followed some time later by the In and
+ * Ack packets. Rather than trying to figure out how to add this
+ * functionality to the normal driver code, we just hijack the
+ * hardware, using these two function to drive the hardware
+ * directly.
+ */
+dwc_otg_core_global_regs_t * global_regs;
+dwc_otg_host_global_regs_t * hc_global_regs;
+dwc_otg_hc_regs_t * hc_regs;
+uint32_t * data_fifo;
+
+static void do_setup(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+
+ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Send Setup packet (Get Device Descriptor)
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+
+ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+
+// hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ /* Fill FIFO with Setup data for Get Device Descriptor */
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+ dwc_write_reg32(data_fifo++, 0x01000680);
+ dwc_write_reg32(data_fifo++, 0x00080000);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+}
+
+static void do_in_ack(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+ host_grxsts_data_t grxsts;
+
+ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Receive Control In packet
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 1;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer */
+ if (grxsts.b.bcnt > 0) {
+ int i;
+ int word_count = (grxsts.b.bcnt + 3) / 4;
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+ for (i = 0; i < word_count; i++) {
+ (void)dwc_read_reg32(data_fifo++);
+ }
+ }
+ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt);
+ break;
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n");
+ break;
+ }
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ break;
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n");
+ break;
+ }
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ // usleep(100000);
+ // mdelay(100);
+ mdelay(1);
+
+ /*
+ * Send handshake packet
+ */
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+
+ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 0;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+}
+
+#endif /* DWC_HS_ELECT_TST */
+
+/** Handles hub class-specific requests.*/
+int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd, u16 _typeReq, u16 _wValue,
+ u16 _wIndex, char *_buf, u16 _wLength)
+{
+ int retval = 0;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ dwc_otg_core_if_t * core_if = hcd_to_dwc_otg_hcd(_hcd)->core_if;
+ struct usb_hub_descriptor *desc;
+ hprt0_data_t hprt0 = {.d32 = 0};
+ uint32_t port_status;
+ switch (_typeReq) {
+ case ClearHubFeature:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearHubFeature 0x%x\n", _wValue);
+ switch (_wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - ClearHubFeature request %xh unknown\n",
+ _wValue);
+ }
+ break;
+ case ClearPortFeature:
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+ switch (_wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtena = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtres = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* Clear Resume bit */
+ mdelay(100);
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+
+ /* Port inidicator not supported */
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ /* Clears drivers internal connect status change
+ * flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+ dwc_otg_hcd->flags.b.port_connect_status_change = 0;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* Clears the driver's internal Port Reset Change
+ * flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+ dwc_otg_hcd->flags.b.port_reset_change = 0;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ /* Clears the driver's internal Port
+ * Enable/Disable Change flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+ dwc_otg_hcd->flags.b.port_enable_change = 0;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Clears the driver's internal Port Suspend
+ * Change flag, which is set when resume signaling on
+ * the host port is complete */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+ dwc_otg_hcd->flags.b.port_suspend_change = 0;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+ dwc_otg_hcd->flags.b.port_over_current_change = 0;
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - "
+ "ClearPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ }
+ break;
+ case GetHubDescriptor:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetHubDescriptor\n");
+ desc = (struct usb_hub_descriptor *)_buf;
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = 0x08;
+ desc->bPwrOn2PwrGood = 1;
+ desc->bHubContrCurrent = 0;
+ desc->bitmap[0] = 0;
+ desc->bitmap[1] = 0xff;
+ break;
+ case GetHubStatus:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetHubStatus\n");
+ memset(_buf, 0, 4);
+ break;
+ case GetPortStatus:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetPortStatus\n");
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+ port_status = 0;
+ if (dwc_otg_hcd->flags.b.port_connect_status_change)
+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+ if (dwc_otg_hcd->flags.b.port_enable_change)
+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+ if (dwc_otg_hcd->flags.b.port_suspend_change)
+ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
+ if (dwc_otg_hcd->flags.b.port_reset_change)
+ port_status |= (1 << USB_PORT_FEAT_C_RESET);
+ if (dwc_otg_hcd->flags.b.port_over_current_change) {
+ DWC_ERROR("Device Not Supported\n");
+ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ }
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return 0's for the remainder of the port status
+ * since the port register can't be read if the core
+ * is in device mode.
+ */
+ *((__le32 *) _buf) = cpu_to_le32(port_status);
+ break;
+ }
+ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
+ if (hprt0.b.prtconnsts)
+ port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+ if (hprt0.b.prtena)
+ port_status |= (1 << USB_PORT_FEAT_ENABLE);
+ if (hprt0.b.prtsusp)
+ port_status |= (1 << USB_PORT_FEAT_SUSPEND);
+ if (hprt0.b.prtovrcurract)
+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+ if (hprt0.b.prtrst)
+ port_status |= (1 << USB_PORT_FEAT_RESET);
+ if (hprt0.b.prtpwr)
+ port_status |= (1 << USB_PORT_FEAT_POWER);
+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
+ port_status |= (1 << USB_PORT_FEAT_HIGHSPEED);
+
+ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
+ port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+ if (hprt0.b.prttstctl)
+ port_status |= (1 << USB_PORT_FEAT_TEST);
+
+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+ *((__le32 *) _buf) = cpu_to_le32(port_status);
+ break;
+ case SetHubFeature:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetHubFeature\n");
+
+ /* No HUB features supported */
+ break;
+ case SetPortFeature:
+ if (_wValue != USB_PORT_FEAT_TEST && (!_wIndex || _wIndex > 1))
+ goto error;
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return without doing anything since the port
+ * register can't be written if the core is in device
+ * mode.
+ */
+ break;
+ }
+ switch (_wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+ if (_hcd->self.otg_port == _wIndex
+ && _hcd->self.b_hnp_enable) {
+ gotgctl_data_t gotgctl = {.d32 = 0};
+ gotgctl.b.hstsethnpen = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->
+ gotgctl, 0, gotgctl.d32);
+ core_if->op_state = A_SUSPEND;
+ }
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+ //DWC_PRINT( "SUSPEND: HPRT0=%0x\n", hprt0.d32);
+ /* Suspend the Phy Clock */
+ {
+ pcgcctl_data_t pcgcctl = {.d32 = 0};
+ pcgcctl.b.stoppclk = 1;
+ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32);
+ }
+
+ /* For HNP the bus must be suspended for at least 200ms. */
+ if (_hcd->self.b_hnp_enable) {
+ mdelay(200);
+
+ //DWC_PRINT( "SUSPEND: wait complete! (%d)\n", _hcd->state);
+ }
+ break;
+ case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_RESET:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_RESET\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+
+ /* When B-Host the Port reset bit is set in
+ * the Start HCD Callback function, so that
+ * the reset is started within 1ms of the HNP
+ * success interrupt. */
+ if (!_hcd->self.is_b_host) {
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ }
+
+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+ MDELAY(60);
+ hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+
+#ifdef DWC_HS_ELECT_TST
+ case USB_PORT_FEAT_TEST:
+ {
+ uint32_t t;
+ gintmsk_data_t gintmsk;
+ t = (_wIndex >> 8); /* MSB wIndex USB */
+ DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_TEST %d\n",
+ t);
+ warn("USB_PORT_FEAT_TEST %d\n", t);
+ if (t < 6) {
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prttstctl = t;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ } else {
+ /* Setup global vars with reg addresses (quick and
+ * dirty hack, should be cleaned up)
+ */
+ global_regs = core_if->core_global_regs;
+ hc_global_regs = core_if->host_if->host_global_regs;
+ hc_regs = (dwc_otg_hc_regs_t *) ((char *) global_regs + 0x500);
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
+ /* Save current interrupt mask */
+ gintmsk.d32 =dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive suspend on the root port */
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 1;
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0,hprt0.d32);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive resume on the root port */
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 0;
+ hprt0.b.prtres = 1;
+ dwc_write_reg32(core_if->host_if->hprt0,hprt0.d32);
+ mdelay(100);
+
+ /* Clear the resume bit */
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0,hprt0.d32);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk,gintmsk.d32);
+ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
+ /* Save current interrupt mask */
+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk,gintmsk.d32);
+ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
+ /* Save current interrupt mask */
+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Send the In and Ack packets */
+ do_in_ack();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk,gintmsk.d32);
+ }
+ }
+ break;
+ }
+
+#endif /* DWC_HS_ELECT_TST */
+ case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+ /* Not supported */
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - "
+ "SetPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ break;
+ }
+ break;
+ default:
+ error:retval = -EINVAL;
+ DWC_WARN("DWC OTG HCD - "
+ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
+ _typeReq, _wIndex, _wValue);
+ break;
+ }
+ return retval;
+}
+
+
+/**
+ * Assigns transactions from a QTD to a free host channel and initializes the
+ * host channel to perform the transactions. The host channel is removed from
+ * the free list.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _qh Transactions from the first QTD for this QH are selected and
+ * assigned to a free host channel.
+ */
+static void assign_and_init_hc(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ dwc_hc_t * hc;
+ dwc_otg_qtd_t * qtd;
+ struct urb *urb;
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, _hcd, _qh);
+ hc = list_entry(_hcd->free_hc_list.next, dwc_hc_t, hc_list_entry);
+
+ /* Remove the host channel from the free list. */
+ list_del_init(&hc->hc_list_entry);
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+ urb = qtd->urb;
+ _qh->channel = hc;
+ _qh->qtd_in_process = qtd;
+
+ /*
+ * Use usb_pipedevice to determine device address. This address is
+ * 0 before the SET_ADDRESS command and the correct address afterward.
+ */
+ hc->dev_addr = usb_pipedevice(urb->pipe);
+ hc->ep_num = usb_pipeendpoint(urb->pipe);
+ if (urb->dev->speed == USB_SPEED_LOW) {
+ hc->speed = DWC_OTG_EP_SPEED_LOW;
+ } else if (urb->dev->speed == USB_SPEED_FULL) {
+ hc->speed = DWC_OTG_EP_SPEED_FULL;
+ } else {
+ hc->speed = DWC_OTG_EP_SPEED_HIGH;
+ }
+ hc->max_packet = dwc_max_packet(_qh->maxp);
+ hc->xfer_started = 0;
+ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
+ hc->error_state = (qtd->error_count > 0);
+ hc->halt_on_queue = 0;
+ hc->halt_pending = 0;
+ hc->requests = 0;
+
+ /*
+ * The following values may be modified in the transfer type section
+ * below. The xfer_len value may be reduced when the transfer is
+ * started to accommodate the max widths of the XferSize and PktCnt
+ * fields in the HCTSIZn register.
+ */
+ hc->do_ping = _qh->ping_state;
+ hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
+ hc->data_pid_start = _qh->data_toggle;
+ hc->multi_count = 1;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff =
+ (uint8_t *)(u32)urb->transfer_dma + urb->actual_length;
+ } else {
+ hc->xfer_buff =
+ (uint8_t *) urb->transfer_buffer + urb->actual_length;
+ }
+ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length;
+ hc->xfer_count = 0;
+
+ /*
+ * Set the split attributes
+ */
+ hc->do_split = 0;
+ if (_qh->do_split) {
+ hc->do_split = 1;
+ hc->xact_pos = qtd->isoc_split_pos;
+ hc->complete_split = qtd->complete_split;
+ hc->hub_addr = urb->dev->tt->hub->devnum;
+ hc->port_addr = urb->dev->ttport;
+ }
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
+ switch (qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP:
+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
+ hc->do_ping = 0;
+ hc->ep_is_in = 0;
+ hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)urb->setup_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) urb->setup_packet;
+ }
+ hc->xfer_len = 8;
+ break;
+ case DWC_OTG_CONTROL_DATA:
+ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n");
+ hc->data_pid_start = qtd->data_toggle;
+ break;
+ case DWC_OTG_CONTROL_STATUS:
+
+ /*
+ * Direction is opposite of data direction or IN if no
+ * data.
+ */
+ DWC_DEBUGPL(DBG_HCDV,
+ " Control status transaction\n");
+ if (urb->transfer_buffer_length == 0) {
+ hc->ep_is_in = 1;
+ } else {
+ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN);
+ }
+ if (hc->ep_is_in) {
+ hc->do_ping = 0;
+ }
+ hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
+ hc->xfer_len = 0;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)_hcd->status_buf_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) _hcd->status_buf;
+ }
+ break;
+ }
+ break;
+ case PIPE_BULK:
+ hc->ep_type = DWC_OTG_EP_TYPE_BULK;
+ break;
+ case PIPE_INTERRUPT:
+ hc->ep_type = DWC_OTG_EP_TYPE_INTR;
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
+ hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)urb->transfer_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) urb->transfer_buffer;
+ }
+ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
+ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
+ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
+ if (hc->xfer_len <= 188) {
+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ } else {
+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN;
+ }
+ }
+ }
+ break;
+ }
+
+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR
+ || hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * This value may be modified when the transfer is started to
+ * reflect the actual transfer length.
+ */
+ hc->multi_count = dwc_hb_mult(_qh->maxp);
+ }
+ dwc_otg_hc_init(_hcd->core_if, hc);
+ hc->qh = _qh;
+}
+
+
+/**
+ * This function selects transactions from the HCD transfer schedule and
+ * assigns them to available host channels. It is called from HCD interrupt
+ * handler functions.
+ *
+ * @param _hcd The HCD state structure.
+ *
+ * @return The types of new transactions that were assigned to host channels.
+ */
+dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
+{
+ struct list_head *qh_ptr;
+ dwc_otg_qh_t * qh;
+ int num_channels;
+ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
+#endif /* */
+
+ /* Process entries in the periodic ready list. */
+ qh_ptr = _hcd->periodic_sched_ready.next;
+ while (qh_ptr != &_hcd->periodic_sched_ready
+ && !list_empty(&_hcd->free_hc_list)) {
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ assign_and_init_hc(_hcd, qh);
+ /*
+ * Move the QH from the periodic ready schedule to the
+ * periodic assigned schedule.
+ */
+ qh_ptr = qh_ptr->next;
+ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
+ ret_val = DWC_OTG_TRANSACTION_PERIODIC;
+ }
+ /*
+ * Process entries in the deferred portion of the non-periodic list.
+ * A NAK put them here and, at the right time, they need to be
+ * placed on the sched_inactive list.
+ */
+ qh_ptr = _hcd->non_periodic_sched_deferred.next;
+ while (qh_ptr != &_hcd->non_periodic_sched_deferred) {
+ uint16_t frame_number =
+ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
+ unsigned long flags;
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ qh_ptr = qh_ptr->next;
+
+ if (dwc_frame_num_le(qh->sched_frame, frame_number)) {
+ // NAK did this
+ /*
+ * Move the QH from the non periodic deferred schedule to
+ * the non periodic inactive schedule.
+ */
+ local_irq_save(flags);
+ list_move(&qh->qh_list_entry,
+ &_hcd->non_periodic_sched_inactive);
+ local_irq_restore(flags);
+ }
+ }
+
+ /*
+ * Process entries in the inactive portion of the non-periodic
+ * schedule. Some free host channels may not be used if they are
+ * reserved for periodic transfers.
+ */
+ qh_ptr = _hcd->non_periodic_sched_inactive.next;
+ num_channels = _hcd->core_if->core_params->host_channels;
+ while (qh_ptr != &_hcd->non_periodic_sched_inactive &&
+ (_hcd->non_periodic_channels <
+ num_channels - _hcd->periodic_channels)
+ && !list_empty(&_hcd->free_hc_list)) {
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ assign_and_init_hc(_hcd, qh);
+
+ /*
+ * Move the QH from the non-periodic inactive schedule to the
+ * non-periodic active schedule.
+ */
+ qh_ptr = qh_ptr->next;
+ list_move(&qh->qh_list_entry,
+ &_hcd->non_periodic_sched_active);
+ if (ret_val == DWC_OTG_TRANSACTION_NONE) {
+ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
+ } else {
+ ret_val = DWC_OTG_TRANSACTION_ALL;
+ }
+ _hcd->non_periodic_channels++;
+ }
+ return ret_val;
+}
+
+/**
+ * Attempts to queue a single transaction request for a host channel
+ * associated with either a periodic or non-periodic transfer. This function
+ * assumes that there is space available in the appropriate request queue. For
+ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
+ * is available in the appropriate Tx FIFO.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _hc Host channel descriptor associated with either a periodic or
+ * non-periodic transfer.
+ * @param _fifo_dwords_avail Number of DWORDs available in the periodic Tx
+ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
+ * transfers.
+ *
+ * @return 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO.
+ */
+static int queue_transaction(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, uint16_t _fifo_dwords_avail)
+{
+ int retval;
+ if (_hcd->core_if->dma_enable) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ _hc->qh->ping_state = 0;
+ }
+ retval = 0;
+ } else if (_hc->halt_pending) {
+ /* Don't queue a request if the channel has been halted. */
+ retval = 0;
+ } else if (_hc->halt_on_queue) {
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _hc->halt_status);
+ retval = 0;
+ } else if (_hc->do_ping) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ }
+ retval = 0;
+ } else if (!_hc->ep_is_in || _hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
+ if ((_fifo_dwords_avail * 4) >= _hc->max_packet) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ retval = 1;
+ } else {
+ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc);
+ }
+ } else {
+ retval = -1;
+ }
+ } else {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ retval = 1;
+ } else {
+ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc);
+ }
+ }
+ return retval;
+}
+
+
+/**
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ */
+static void process_non_periodic_channels(dwc_otg_hcd_t * _hcd)
+{
+ gnptxsts_data_t tx_status;
+ struct list_head *orig_qh_ptr;
+ dwc_otg_qh_t * qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+ int more_to_do = 0;
+ dwc_otg_core_global_regs_t * global_regs =
+ _hcd->core_if->core_global_regs;
+ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
+
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif /* */
+ /*
+ * Keep track of the starting point. Skip over the start-of-list
+ * entry.
+ */
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ orig_qh_ptr = _hcd->non_periodic_qh_ptr;
+
+ /*
+ * Process once through the active list or until no more space is
+ * available in the request queue or the Tx FIFO.
+ */
+ do {
+
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ if (!_hcd->core_if->dma_enable
+ && tx_status.b.nptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+ qh =
+ list_entry(_hcd->non_periodic_qh_ptr, dwc_otg_qh_t,
+ qh_list_entry);
+ status =
+ queue_transaction(_hcd, qh->channel,
+ tx_status.b.nptxfspcavail);
+
+ if (status > 0) {
+ more_to_do = 1;
+ } else if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+#ifdef OTG_PLB_DMA_TASKLET
+ if (atomic_read(&release_later)) {
+ break;
+ }
+#endif
+
+ /* Advance to next QH, skipping start-of-list entry. */
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ } while (_hcd->non_periodic_qh_ptr != orig_qh_ptr);
+ if (!_hcd->core_if->dma_enable) {
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ intr_mask.b.nptxfempty = 1;
+
+#ifndef OTG_PLB_DMA_TASKLET
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif /* */
+#endif
+
+ if (more_to_do || no_queue_space || no_fifo_space) {
+
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the non-periodic
+ * Tx FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, 0,intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ dwc_modify_reg32(&global_regs->gintmsk,intr_mask.d32, 0);
+ }
+ }
+}
+
+/**
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the DWC_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ */
+static void process_periodic_channels(dwc_otg_hcd_t * _hcd)
+{
+ hptxsts_data_t tx_status;
+ struct list_head *qh_ptr;
+ dwc_otg_qh_t * qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+ dwc_otg_host_global_regs_t * host_regs;
+ host_regs = _hcd->core_if->host_if->host_global_regs;
+ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
+
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.ptxfspcavail);
+
+#endif /* */
+ qh_ptr = _hcd->periodic_sched_assigned.next;
+ while (qh_ptr != &_hcd->periodic_sched_assigned) {
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ if (tx_status.b.ptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+
+ /*
+ * Set a flag if we're queuing high-bandwidth in slave mode.
+ * The flag prevents any halts to get into the request queue in
+ * the middle of multiple high-bandwidth packets getting queued.
+ */
+ if ((!_hcd->core_if->dma_enable) &&
+ (qh->channel->multi_count > 1)) {
+ _hcd->core_if->queuing_high_bandwidth = 1;
+ }
+ status = queue_transaction(_hcd, qh->channel,tx_status.b.ptxfspcavail);
+ if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+
+ /*
+ * In Slave mode, stay on the current transfer until there is
+ * nothing more to do or the high-bandwidth request count is
+ * reached. In DMA mode, only need to queue one request. The
+ * controller automatically handles multiple packets for
+ * high-bandwidth transfers.
+ */
+ if (_hcd->core_if->dma_enable ||
+ (status == 0 || qh->channel->requests == qh->channel->multi_count)) {
+ qh_ptr = qh_ptr->next;
+
+ /*
+ * Move the QH from the periodic assigned schedule to
+ * the periodic queued schedule.
+ */
+ list_move(&qh->qh_list_entry,
+ &_hcd->periodic_sched_queued);
+
+ /* done queuing high bandwidth */
+ _hcd->core_if->queuing_high_bandwidth = 0;
+ }
+ }
+ if (!_hcd->core_if->dma_enable) {
+ dwc_otg_core_global_regs_t * global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ global_regs = _hcd->core_if->core_global_regs;
+ intr_mask.b.ptxfempty = 1;
+
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ DWC_DEBUGPL(DBG_HCDV," P Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV," P Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.ptxfspcavail);
+
+#endif /* */
+ if (!(list_empty(&_hcd->periodic_sched_assigned))
+ || no_queue_space || no_fifo_space) {
+
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the periodic Tx
+ * FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, 0,intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ dwc_modify_reg32(&global_regs->gintmsk,intr_mask.d32, 0);
+ }
+ }
+}
+
+
+
+/**
+ * This function processes the currently active host channels and queues
+ * transactions for these channels to the DWC_otg controller. It is called
+ * from HCD interrupt handler functions.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _tr_type The type(s) of transactions to queue (non-periodic,
+ * periodic, or both).
+ */
+void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t * _hcd,
+ dwc_otg_transaction_type_e _tr_type)
+{
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
+
+#endif /* */
+ /* Process host channels associated with periodic transfers. */
+ if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC
+ || _tr_type == DWC_OTG_TRANSACTION_ALL)
+ && !list_empty(&_hcd->periodic_sched_assigned)) {
+ process_periodic_channels(_hcd);
+ }
+
+ /* Process host channels associated with non-periodic transfers. */
+ if ((_tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC
+ || _tr_type == DWC_OTG_TRANSACTION_ALL)) {
+ if (!list_empty(&_hcd->non_periodic_sched_active)) {
+ process_non_periodic_channels(_hcd);
+ } else {
+ /*
+ * Ensure NP Tx FIFO empty interrupt is disabled when
+ * there are no non-periodic transfers to process.
+ */
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&_hcd->core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+ }
+ }
+}
+
+/**
+ * Sets the final status of an URB and returns it to the device driver. Any
+ * required cleanup of the URB is performed.
+ */
+void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t * _hcd, struct urb *_urb,
+ int _status)
+__releases(_hcd->lock)
+__acquires(_hcd->lock)
+{
+
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
+ __func__, _urb, usb_pipedevice(_urb->pipe),
+ usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) ? "IN" : "OUT", _status);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < _urb->number_of_packets; i++) {
+ DWC_PRINT(" ISO Desc %d status: %d\n", i,
+ _urb->iso_frame_desc[i].status);
+ }
+ }
+ }
+
+#endif /* */
+ _urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), _urb);
+ spin_unlock(&_hcd->lock);
+ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb, _status);
+ spin_lock(&_hcd->lock);
+}
+
+
+/*
+ * Returns the Queue Head for an URB.
+ */
+dwc_otg_qh_t * dwc_urb_to_qh(struct urb *_urb)
+{
+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(_urb);
+ return (dwc_otg_qh_t *) ep->hcpriv;
+}
+
+
+#ifdef DEBUG
+void dwc_print_setup_data(uint8_t * setup)
+{
+ int i;
+ if (CHK_DEBUG_LEVEL(DBG_HCD)) {
+ DWC_PRINT("Setup Data = MSB ");
+ for (i = 7; i >= 0; i--)
+ DWC_PRINT("%02x ", setup[i]);
+ DWC_PRINT("\n");
+ DWC_PRINT(" bmRequestType Tranfer = %s\n",
+ (setup[0] & 0x80) ? "Device-to-Host" :
+ "Host-to-Device");
+ DWC_PRINT(" bmRequestType Type = ");
+ switch ((setup[0] & 0x60) >> 5) {
+ case 0:
+ DWC_PRINT("Standard\n");
+ break;
+ case 1:
+ DWC_PRINT("Class\n");
+ break;
+ case 2:
+ DWC_PRINT("Vendor\n");
+ break;
+ case 3:
+ DWC_PRINT("Reserved\n");
+ break;
+ }
+ DWC_PRINT(" bmRequestType Recipient = ");
+ switch (setup[0] & 0x1f) {
+ case 0:
+ DWC_PRINT("Device\n");
+ break;
+ case 1:
+ DWC_PRINT("Interface\n");
+ break;
+ case 2:
+ DWC_PRINT("Endpoint\n");
+ break;
+ case 3:
+ DWC_PRINT("Other\n");
+ break;
+ default:
+ DWC_PRINT("Reserved\n");
+ break;
+ }
+ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]);
+ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *) & setup[2]));
+ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *) & setup[4]));
+ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *) & setup[6]));
+ }
+}
+
+
+#endif /* */
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd)
+{
+
+/*
+#ifdef DEBUG
+ DWC_PRINT("Frame remaining at SOF:\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->frrem_samples, _hcd->frrem_accum,
+ (_hcd->frrem_samples > 0) ?
+ _hcd->frrem_accum/_hcd->frrem_samples : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_7_samples, _hcd->core_if->hfnum_7_frrem_accum,
+ (_hcd->core_if->hfnum_7_samples > 0) ?
+ _hcd->core_if->hfnum_7_frrem_accum/_hcd->core_if->hfnum_7_samples : 0);
+ DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_0_samples, _hcd->core_if->hfnum_0_frrem_accum,
+ (_hcd->core_if->hfnum_0_samples > 0) ?
+ _hcd->core_if->hfnum_0_frrem_accum/_hcd->core_if->hfnum_0_samples : 0);
+ DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_other_samples, _hcd->core_if->hfnum_other_frrem_accum,
+ (_hcd->core_if->hfnum_other_samples > 0) ?
+ _hcd->core_if->hfnum_other_frrem_accum/_hcd->core_if->hfnum_other_samples : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point A (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_a, _hcd->hfnum_7_frrem_accum_a,
+ (_hcd->hfnum_7_samples_a > 0) ?
+ _hcd->hfnum_7_frrem_accum_a/_hcd->hfnum_7_samples_a : 0);
+ DWC_PRINT("Frame remaining at sample point A (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_a, _hcd->hfnum_0_frrem_accum_a,
+ (_hcd->hfnum_0_samples_a > 0) ?
+ _hcd->hfnum_0_frrem_accum_a/_hcd->hfnum_0_samples_a : 0);
+ DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_a, _hcd->hfnum_other_frrem_accum_a,
+ (_hcd->hfnum_other_samples_a > 0) ?
+ _hcd->hfnum_other_frrem_accum_a/_hcd->hfnum_other_samples_a : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point B (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_b, _hcd->hfnum_7_frrem_accum_b,
+ (_hcd->hfnum_7_samples_b > 0) ?
+ _hcd->hfnum_7_frrem_accum_b/_hcd->hfnum_7_samples_b : 0);
+ DWC_PRINT("Frame remaining at sample point B (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_b, _hcd->hfnum_0_frrem_accum_b,
+ (_hcd->hfnum_0_samples_b > 0) ?
+ _hcd->hfnum_0_frrem_accum_b/_hcd->hfnum_0_samples_b : 0);
+ DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_b, _hcd->hfnum_other_frrem_accum_b,
+ (_hcd->hfnum_other_samples_b > 0) ?
+ _hcd->hfnum_other_frrem_accum_b/_hcd->hfnum_other_samples_b : 0);
+#endif
+*/
+} void dwc_otg_hcd_dump_state(dwc_otg_hcd_t * _hcd)
+{
+
+#ifdef DEBUG
+ int num_channels;
+ int i;
+ gnptxsts_data_t np_tx_status;
+ hptxsts_data_t p_tx_status;
+ num_channels = _hcd->core_if->core_params->host_channels;
+ DWC_PRINT("\n");
+ DWC_PRINT
+ ("************************************************************\n");
+ DWC_PRINT("HCD State:\n");
+ DWC_PRINT(" Num channels: %d\n", num_channels);
+ for (i = 0; i < num_channels; i++) {
+ dwc_hc_t * hc = _hcd->hc_ptr_array[i];
+ DWC_PRINT(" Channel %d:\n", i);
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ DWC_PRINT(" speed: %d\n", hc->speed);
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_packet);
+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
+ DWC_PRINT(" multi_count: %d\n", hc->multi_count);
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
+ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count);
+ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue);
+ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending);
+ DWC_PRINT(" halt_status: %d\n", hc->halt_status);
+ DWC_PRINT(" do_split: %d\n", hc->do_split);
+ DWC_PRINT(" complete_split: %d\n", hc->complete_split);
+ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr);
+ DWC_PRINT(" port_addr: %d\n", hc->port_addr);
+ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos);
+ DWC_PRINT(" requests: %d\n", hc->requests);
+ DWC_PRINT(" qh: %p\n", hc->qh);
+ if (hc->xfer_started) {
+ hfnum_data_t hfnum;
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hfnum.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->
+ host_global_regs->hfnum);
+ hcchar.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hcchar);
+ hctsiz.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hctsiz);
+ hcint.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hcint);
+ hcintmsk.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hcintmsk);
+ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32);
+ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32);
+ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32);
+ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32);
+ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32);
+ }
+ if (hc->xfer_started && (hc->qh != NULL)
+ && (hc->qh->qtd_in_process != NULL)) {
+ dwc_otg_qtd_t * qtd;
+ struct urb *urb;
+ qtd = hc->qh->qtd_in_process;
+ urb = qtd->urb;
+ DWC_PRINT(" URB Info:\n");
+ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb);
+ if (urb != NULL) {
+ DWC_PRINT(" Dev: %d, EP: %d %s\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->
+ pipe) ? "IN" : "OUT");
+ DWC_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->
+ pipe)));
+ DWC_PRINT(" transfer_buffer: %p\n",
+ urb->transfer_buffer);
+ DWC_PRINT(" transfer_dma: %p\n",
+ (void *)urb->transfer_dma);
+ DWC_PRINT(" transfer_buffer_length: %d\n",
+ urb->transfer_buffer_length);
+ DWC_PRINT(" actual_length: %d\n",
+ urb->actual_length);
+ }
+ }
+ } DWC_PRINT(" non_periodic_channels: %d\n",
+ _hcd->non_periodic_channels);
+ DWC_PRINT(" periodic_channels: %d\n", _hcd->periodic_channels);
+ DWC_PRINT(" periodic_usecs: %d\n", _hcd->periodic_usecs);
+ np_tx_status.d32 =
+ dwc_read_reg32(&_hcd->core_if->core_global_regs->gnptxsts);
+ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n",
+ np_tx_status.b.nptxqspcavail);
+ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n",
+ np_tx_status.b.nptxfspcavail);
+ p_tx_status.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hptxsts);
+ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n",
+ p_tx_status.b.ptxqspcavail);
+ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
+ dwc_otg_hcd_dump_frrem(_hcd);
+ dwc_otg_dump_global_registers(_hcd->core_if);
+ dwc_otg_dump_host_registers(_hcd->core_if);
+ DWC_PRINT
+ ("************************************************************\n");
+ DWC_PRINT("\n");
+
+#endif /* */
+}
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h
index ae2b8c3e179..9ac7598b3ed 100644
--- a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h
@@ -36,7 +36,11 @@
#include <linux/list.h>
#include <linux/usb.h>
+#if 0
+#include <linux/usb/hcd.h>
+#else
#include <../drivers/usb/core/hcd.h>
+#endif
struct lm_device;
struct dwc_otg_device;
@@ -205,6 +209,9 @@ typedef struct dwc_otg_qh {
/** @} */
+ uint16_t speed;
+ uint16_t frame_usecs[8];
+
/** Entry for QH in either the periodic or non-periodic schedule. */
struct list_head qh_list_entry;
} dwc_otg_qh_t;
@@ -314,6 +321,18 @@ typedef struct dwc_otg_hcd {
*/
uint16_t periodic_usecs;
+ /**
+ * Total bandwidth claimed so far for all periodic transfers
+ * in a frame.
+ * This will include a mixture of HS and FS transfers.
+ * Units are microseconds per (micro)frame.
+ * We have a budget per frame and have to schedule
+ * transactions accordingly.
+ * Watch out for the fact that things are actually scheduled for the
+ * "next frame".
+ */
+ uint16_t frame_usecs[8];
+
/**
* Frame number read from the core at SOF. The value ranges from 0 to
* DWC_HFNUM_MAX_FRNUM.
@@ -327,17 +346,9 @@ typedef struct dwc_otg_hcd {
struct list_head free_hc_list;
/**
- * Number of host channels assigned to periodic transfers. Currently
- * assuming that there is a dedicated host channel for each periodic
- * transaction and at least one host channel available for
- * non-periodic transactions.
- */
- int periodic_channels;
-
- /**
- * Number of host channels assigned to non-periodic transfers.
+ * Number of available host channels.
*/
- int non_periodic_channels;
+ int available_host_channels;
/**
* Array of pointers to the host channel descriptors. Allows accessing
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h.org b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h.org
new file mode 100644
index 00000000000..ae2b8c3e179
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h.org
@@ -0,0 +1,660 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.h $
+ * $Revision: #6 $
+ * $Date: 2006/12/05 $
+ * $Change: 762293 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef CONFIG_DWC_DEVICE_ONLY
+#if !defined(__DWC_HCD_H__)
+#define __DWC_HCD_H__
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <../drivers/usb/core/hcd.h>
+
+struct lm_device;
+struct dwc_otg_device;
+
+#include "dwc_otg_cil.h"
+
+/**
+ * @file
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Host Contoller Driver (HCD).
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the DWC_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+/**
+ * Phases for control transfers.
+ */
+typedef enum dwc_otg_control_phase {
+ DWC_OTG_CONTROL_SETUP,
+ DWC_OTG_CONTROL_DATA,
+ DWC_OTG_CONTROL_STATUS
+} dwc_otg_control_phase_e;
+
+/** Transaction types. */
+typedef enum dwc_otg_transaction_type {
+ DWC_OTG_TRANSACTION_NONE,
+ DWC_OTG_TRANSACTION_PERIODIC,
+ DWC_OTG_TRANSACTION_NON_PERIODIC,
+ DWC_OTG_TRANSACTION_ALL
+} dwc_otg_transaction_type_e;
+
+/**
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+
+struct dwc_otg_qh;
+
+typedef struct dwc_otg_qtd {
+ /**
+ * Determines the PID of the next data packet for the data phase of
+ * control transfers. Ignored for other transfer types.<br>
+ * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Current phase for control transfers (Setup, Data, or Status). */
+ dwc_otg_control_phase_e control_phase;
+
+ /** Keep track of the current split type
+ * for FS/LS endpoints on a HS Hub */
+ uint8_t complete_split;
+
+ /** How many bytes transferred during SSPLIT OUT */
+ uint32_t ssplit_out_xfer_count;
+
+ /**
+ * Holds the number of bus errors that have occurred for a transaction
+ * within this transfer.
+ */
+ uint8_t error_count;
+
+ /**
+ * Index of the next frame descriptor for an isochronous transfer. A
+ * frame descriptor describes the buffer position and length of the
+ * data to be transferred in the next scheduled (micro)frame of an
+ * isochronous transfer. It also holds status for that transaction.
+ * The frame index starts at 0.
+ */
+ int isoc_frame_index;
+
+ /** Position of the ISOC split on full/low speed */
+ uint8_t isoc_split_pos;
+
+ /** Position of the ISOC split in the buffer for the current frame */
+ uint16_t isoc_split_offset;
+
+ /** URB for this transfer */
+ struct urb *urb;
+
+ /** This list of QTDs */
+ struct list_head qtd_list_entry;
+
+ /* Field to track the qh pointer */
+ struct dwc_otg_qh *qtd_qh_ptr;
+} dwc_otg_qtd_t;
+
+/**
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+typedef struct dwc_otg_qh {
+ /**
+ * Endpoint type.
+ * One of the following values:
+ * - USB_ENDPOINT_XFER_CONTROL
+ * - USB_ENDPOINT_XFER_ISOC
+ * - USB_ENDPOINT_XFER_BULK
+ * - USB_ENDPOINT_XFER_INT
+ */
+ uint8_t ep_type;
+ uint8_t ep_is_in;
+
+ /** wMaxPacketSize Field of Endpoint Descriptor. */
+ uint16_t maxp;
+
+ /**
+ * Determines the PID of the next data packet for non-control
+ * transfers. Ignored for control transfers.<br>
+ * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Ping state if 1. */
+ uint8_t ping_state;
+
+ /**
+ * List of QTDs for this QH.
+ */
+ struct list_head qtd_list;
+
+ /** Host channel currently processing transfers for this QH. */
+ dwc_hc_t *channel;
+
+ /** QTD currently assigned to a host channel for this QH. */
+ dwc_otg_qtd_t *qtd_in_process;
+
+ /** Full/low speed endpoint on high-speed hub requires split. */
+ uint8_t do_split;
+
+ /** @name Periodic schedule information */
+ /** @{ */
+
+ /** Bandwidth in microseconds per (micro)frame. */
+ uint8_t usecs;
+
+ /** Interval between transfers in (micro)frames. */
+ uint16_t interval;
+
+ /**
+ * (micro)frame to initialize a periodic transfer. The transfer
+ * executes in the following (micro)frame.
+ */
+ uint16_t sched_frame;
+
+ /** (micro)frame at which last start split was initialized. */
+ uint16_t start_split_frame;
+
+ /** @} */
+
+ /** Entry for QH in either the periodic or non-periodic schedule. */
+ struct list_head qh_list_entry;
+} dwc_otg_qh_t;
+
+/**
+ * This structure holds the state of the HCD, including the non-periodic and
+ * periodic schedules.
+ */
+typedef struct dwc_otg_hcd {
+
+ spinlock_t lock;
+
+ /** DWC OTG Core Interface Layer */
+ dwc_otg_core_if_t *core_if;
+
+ /** Internal DWC HCD Flags */
+ volatile union dwc_otg_hcd_internal_flags {
+ uint32_t d32;
+ struct {
+ unsigned port_connect_status_change : 1;
+ unsigned port_connect_status : 1;
+ unsigned port_reset_change : 1;
+ unsigned port_enable_change : 1;
+ unsigned port_suspend_change : 1;
+ unsigned port_over_current_change : 1;
+ unsigned reserved : 27;
+ } b;
+ } flags;
+
+ /**
+ * Inactive items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are not
+ * currently assigned to a host channel.
+ */
+ struct list_head non_periodic_sched_inactive;
+
+ /**
+ * Deferred items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are not
+ * currently assigned to a host channel.
+ * When we get an NAK, the QH goes here.
+ */
+ struct list_head non_periodic_sched_deferred;
+
+ /**
+ * Active items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are
+ * currently assigned to a host channel.
+ */
+ struct list_head non_periodic_sched_active;
+
+ /**
+ * Pointer to the next Queue Head to process in the active
+ * non-periodic schedule.
+ */
+ struct list_head *non_periodic_qh_ptr;
+
+ /**
+ * Inactive items in the periodic schedule. This is a list of QHs for
+ * periodic transfers that are _not_ scheduled for the next frame.
+ * Each QH in the list has an interval counter that determines when it
+ * needs to be scheduled for execution. This scheduling mechanism
+ * allows only a simple calculation for periodic bandwidth used (i.e.
+ * must assume that all periodic transfers may need to execute in the
+ * same frame). However, it greatly simplifies scheduling and should
+ * be sufficient for the vast majority of OTG hosts, which need to
+ * connect to a small number of peripherals at one time.
+ *
+ * Items move from this list to periodic_sched_ready when the QH
+ * interval counter is 0 at SOF.
+ */
+ struct list_head periodic_sched_inactive;
+
+ /**
+ * List of periodic QHs that are ready for execution in the next
+ * frame, but have not yet been assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_assigned as host
+ * channels become available during the current frame.
+ */
+ struct list_head periodic_sched_ready;
+
+ /**
+ * List of periodic QHs to be executed in the next frame that are
+ * assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_queued as the
+ * transactions for the QH are queued to the DWC_otg controller.
+ */
+ struct list_head periodic_sched_assigned;
+
+ /**
+ * List of periodic QHs that have been queued for execution.
+ *
+ * Items move from this list to either periodic_sched_inactive or
+ * periodic_sched_ready when the channel associated with the transfer
+ * is released. If the interval for the QH is 1, the item moves to
+ * periodic_sched_ready because it must be rescheduled for the next
+ * frame. Otherwise, the item moves to periodic_sched_inactive.
+ */
+ struct list_head periodic_sched_queued;
+
+ /**
+ * Total bandwidth claimed so far for periodic transfers. This value
+ * is in microseconds per (micro)frame. The assumption is that all
+ * periodic transfers may occur in the same (micro)frame.
+ */
+ uint16_t periodic_usecs;
+
+ /**
+ * Frame number read from the core at SOF. The value ranges from 0 to
+ * DWC_HFNUM_MAX_FRNUM.
+ */
+ uint16_t frame_number;
+
+ /**
+ * Free host channels in the controller. This is a list of
+ * dwc_hc_t items.
+ */
+ struct list_head free_hc_list;
+
+ /**
+ * Number of host channels assigned to periodic transfers. Currently
+ * assuming that there is a dedicated host channel for each periodic
+ * transaction and at least one host channel available for
+ * non-periodic transactions.
+ */
+ int periodic_channels;
+
+ /**
+ * Number of host channels assigned to non-periodic transfers.
+ */
+ int non_periodic_channels;
+
+ /**
+ * Array of pointers to the host channel descriptors. Allows accessing
+ * a host channel descriptor given the host channel number. This is
+ * useful in interrupt handlers.
+ */
+ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS];
+
+ /**
+ * Buffer to use for any data received during the status phase of a
+ * control transfer. Normally no data is transferred during the status
+ * phase. This buffer is used as a bit bucket.
+ */
+ uint8_t *status_buf;
+
+ /**
+ * DMA address for status_buf.
+ */
+ dma_addr_t status_buf_dma;
+#define DWC_OTG_HCD_STATUS_BUF_SIZE 64
+
+ /**
+ * Structure to allow starting the HCD in a non-interrupt context
+ * during an OTG role change.
+ */
+ struct work_struct start_work;
+ struct usb_hcd *_p;
+
+ /**
+ * Connection timer. An OTG host must display a message if the device
+ * does not connect. Started when the VBus power is turned on via
+ * sysfs attribute "buspower".
+ */
+ struct timer_list conn_timer;
+
+ /* Tasket to do a reset */
+ struct tasklet_struct *reset_tasklet;
+
+#ifdef CONFIG_DWC_DEBUG
+ uint32_t frrem_samples;
+ uint64_t frrem_accum;
+
+ uint32_t hfnum_7_samples_a;
+ uint64_t hfnum_7_frrem_accum_a;
+ uint32_t hfnum_0_samples_a;
+ uint64_t hfnum_0_frrem_accum_a;
+ uint32_t hfnum_other_samples_a;
+ uint64_t hfnum_other_frrem_accum_a;
+
+ uint32_t hfnum_7_samples_b;
+ uint64_t hfnum_7_frrem_accum_b;
+ uint32_t hfnum_0_samples_b;
+ uint64_t hfnum_0_frrem_accum_b;
+ uint32_t hfnum_other_samples_b;
+ uint64_t hfnum_other_frrem_accum_b;
+#endif
+
+} dwc_otg_hcd_t;
+
+/** Gets the dwc_otg_hcd from a struct usb_hcd */
+static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd)
+{
+ return (dwc_otg_hcd_t *)(hcd->hcd_priv);
+}
+
+/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */
+static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd)
+{
+ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv);
+}
+
+/** @name HCD Create/Destroy Functions */
+/** @{ */
+extern int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device);
+extern void dwc_otg_hcd_remove(struct device *_dev);
+/** @} */
+
+/** @name Linux HC Driver API Functions */
+/** @{ */
+
+extern int dwc_otg_hcd_start(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_stop(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_free(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags);
+extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
+/* struct usb_host_endpoint *ep,*/
+ struct urb *urb, int status);
+extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd,
+ char *buf);
+extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength);
+
+/** @} */
+
+/** @name Transaction Execution Functions */
+/** @{ */
+extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd);
+extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hcd,
+ dwc_otg_transaction_type_e _tr_type);
+extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *_urb,
+ int _status);
+/** @} */
+
+/** @name Interrupt Handler Functions */
+/** @{ */
+extern int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_disconnect_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_dwc_otg_hcd, uint32_t _num);
+extern int32_t dwc_otg_hcd_handle_session_req_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+/** @} */
+
+
+/** @name Schedule Queue Functions */
+/** @{ */
+
+/* Implemented in dwc_otg_hcd_queue.c */
+extern dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_hcd, struct urb *_urb);
+extern void dwc_otg_hcd_qh_init (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, struct urb *_urb);
+extern void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh);
+extern int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh);
+extern void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh);
+extern void dwc_otg_hcd_qh_deactivate (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int sched_csplit);
+extern int dwc_otg_hcd_qh_deferr (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int delay);
+
+/** Remove and free a QH */
+static inline void dwc_otg_hcd_qh_remove_and_free (dwc_otg_hcd_t *_hcd,
+ dwc_otg_qh_t *_qh)
+{
+ dwc_otg_hcd_qh_remove (_hcd, _qh);
+ dwc_otg_hcd_qh_free (_qh);
+}
+
+/** Allocates memory for a QH structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc (void)
+{
+ return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t), GFP_KERNEL);
+}
+
+extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb);
+extern void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb);
+extern int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd);
+
+/** Allocates memory for a QTD structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc (void)
+{
+ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_t), GFP_KERNEL);
+}
+
+/** Frees the memory for a QTD structure. QTD should already be removed from
+ * list.
+ * @param[in] _qtd QTD to free.*/
+static inline void dwc_otg_hcd_qtd_free (dwc_otg_qtd_t *_qtd)
+{
+ kfree (_qtd);
+}
+
+/** Removes a QTD from list.
+ * @param[in] _qtd QTD to remove from list. */
+static inline void dwc_otg_hcd_qtd_remove (dwc_otg_qtd_t *_qtd)
+{
+ unsigned long flags;
+ local_irq_save (flags);
+ list_del (&_qtd->qtd_list_entry);
+ local_irq_restore (flags);
+}
+
+/** Remove and free a QTD */
+static inline void dwc_otg_hcd_qtd_remove_and_free (dwc_otg_qtd_t *_qtd)
+{
+ dwc_otg_hcd_qtd_remove (_qtd);
+ dwc_otg_hcd_qtd_free (_qtd);
+}
+
+/** @} */
+
+
+/** @name Internal Functions */
+/** @{ */
+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb);
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd);
+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd);
+/** @} */
+
+/** Gets the usb_host_endpoint associated with an URB. */
+static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *_urb)
+{
+ struct usb_device *dev = _urb->dev;
+ int ep_num = usb_pipeendpoint(_urb->pipe);
+
+ if (usb_pipein(_urb->pipe))
+ return dev->ep_in[ep_num];
+ else
+ return dev->ep_out[ep_num];
+}
+
+/**
+ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is
+ * qualified with its direction (possible 32 endpoints per device).
+ */
+#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) \
+ ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
+
+/** Gets the QH that contains the list_head */
+#define dwc_list_to_qh(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qh_t,qh_list_entry))
+
+/** Gets the QTD that contains the list_head */
+#define dwc_list_to_qtd(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qtd_t,qtd_list_entry))
+
+/** Check if QH is non-periodic */
+#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \
+ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL))
+
+/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */
+#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+/** Packet size for any kind of endpoint descriptor */
+#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/**
+ * Returns true if _frame1 is less than or equal to _frame2. The comparison is
+ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_le(uint16_t _frame1, uint16_t _frame2)
+{
+ return ((_frame2 - _frame1) & DWC_HFNUM_MAX_FRNUM) <=
+ (DWC_HFNUM_MAX_FRNUM >> 1);
+}
+
+/**
+ * Returns true if _frame1 is greater than _frame2. The comparison is done
+ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_gt(uint16_t _frame1, uint16_t _frame2)
+{
+ return (_frame1 != _frame2) &&
+ (((_frame1 - _frame2) & DWC_HFNUM_MAX_FRNUM) <
+ (DWC_HFNUM_MAX_FRNUM >> 1));
+}
+
+/**
+ * Increments _frame by the amount specified by _inc. The addition is done
+ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline uint16_t dwc_frame_num_inc(uint16_t _frame, uint16_t _inc)
+{
+ return (_frame + _inc) & DWC_HFNUM_MAX_FRNUM;
+}
+
+static inline uint16_t dwc_full_frame_num (uint16_t _frame)
+{
+ return ((_frame) & DWC_HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline uint16_t dwc_micro_frame_num (uint16_t _frame)
+{
+ return (_frame) & 0x7;
+}
+
+#ifdef CONFIG_DWC_DEBUG
+/**
+ * Macro to sample the remaining PHY clocks left in the current frame. This
+ * may be used during debugging to determine the average time it takes to
+ * execute sections of code. There are two possible sample points, "a" and
+ * "b", so the _letter argument must be one of these values.
+ *
+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
+ * example, "cat /sys/devices/lm0/hcd_frrem".
+ */
+#define dwc_sample_frrem(_hcd, _qh, _letter) \
+{ \
+ hfnum_data_t hfnum; \
+ dwc_otg_qtd_t *qtd; \
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \
+ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \
+ switch (hfnum.b.frnum & 0x7) { \
+ case 7: \
+ _hcd->hfnum_7_samples_##_letter++; \
+ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ case 0: \
+ _hcd->hfnum_0_samples_##_letter++; \
+ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ default: \
+ _hcd->hfnum_other_samples_##_letter++; \
+ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ } \
+ } \
+}
+#else
+#define dwc_sample_frrem(_hcd, _qh, _letter)
+#endif
+#endif
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c
index 39a995f9948..153c9a98c4a 100644
--- a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c
@@ -39,8 +39,7 @@
const int erratum_usb09_patched = 0;
const int deferral_on = 1;
-int nak_deferral_delay = 20;
-module_param(nak_deferral_delay, int, 0644);
+const int nak_deferral_delay = 8;
const int nyet_deferral_delay = 1;
/** @file
@@ -527,7 +526,9 @@ static int update_urb_state_xfer_comp(dwc_hc_t * _hc,
DWC_OTG_HC_XFER_COMPLETE, &short_read);
if (short_read || (_urb->actual_length == _urb->transfer_buffer_length)) {
xfer_done = 1;
- if (short_read && (_urb->transfer_flags & URB_SHORT_NOT_OK)) {
+ if (_urb->actual_length == _urb->transfer_buffer_length) {
+ *status = 0;
+ } else if (short_read && (_urb->transfer_flags & URB_SHORT_NOT_OK)) {
*status = -EREMOTEIO;
} else {
*status = 0;
@@ -701,6 +702,7 @@ static void release_channel(dwc_otg_hcd_t * _hcd,
dwc_otg_qh_t * _qh;
int deact = 1;
int retry_delay = 1;
+ unsigned long flags;
DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", __func__,
_hc->hc_num, _halt_status);
@@ -746,15 +748,22 @@ static void release_channel(dwc_otg_hcd_t * _hcd,
*/
goto cleanup;
case DWC_OTG_HC_XFER_NO_HALT_STATUS:
+#ifdef CONFIG_DWC_DEBUG
DWC_ERROR("%s: No halt_status, channel %d\n", __func__,
_hc->hc_num);
+#endif
free_qtd = 0;
break;
default:
free_qtd = 0;
break;
}
- *must_free = free_qtd;
+ if (free_qtd) {
+ /* Only change must_free to true (do not set to zero here -- it is
+ * pre-initialized to zero).
+ */
+ *must_free = 1;
+ }
if (deact) {
deactivate_qh(_hcd, _hc->qh, free_qtd);
}
@@ -766,19 +775,10 @@ cleanup:
*/
dwc_otg_hc_cleanup(_hcd->core_if, _hc);
list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_list);
- switch (_hc->ep_type) {
- case DWC_OTG_EP_TYPE_CONTROL:
- case DWC_OTG_EP_TYPE_BULK:
- _hcd->non_periodic_channels--;
- break;
- default:
- /*
- * Don't release reservations for periodic channels here.
- * That's done when a periodic transfer is descheduled (i.e.
- * when the QH is removed from the periodic schedule).
- */
- break;
- }
+ local_irq_save(flags);
+ _hcd->available_host_channels++;
+ local_irq_restore(flags);
+
/* Try to queue more transfers now that there's a free channel, */
/* unless erratum_usb09_patched is set */
if (!erratum_usb09_patched) {
@@ -1139,7 +1139,6 @@ static int32_t handle_hc_nak_intr(dwc_otg_hcd_t * _hcd,
break;
}
handle_nak_done:disable_hc_int(_hc_regs, nak);
- clear_hc_int(_hc_regs, nak);
return 1;
}
@@ -1210,6 +1209,8 @@ static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * _hcd,
* automatically executes the PING, then the transfer.
*/
halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
+ } else {
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
}
}
@@ -1218,7 +1219,6 @@ static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * _hcd,
* continue transferring data after clearing the error count.
*/
disable_hc_int(_hc_regs, ack);
- clear_hc_int(_hc_regs, ack);
return 1;
}
@@ -1489,7 +1489,7 @@ static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t * _hcd,
* @return 1 if halt status is ok, 0 otherwise.
*/
static inline int halt_status_ok(dwc_otg_hcd_t * _hcd,
- dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
{
hcchar_data_t hcchar;
hctsiz_data_t hctsiz;
@@ -1596,6 +1596,8 @@ static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * _hcd,
handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
} else if (hcint.b.datatglerr) {
handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ _hc->qh->data_toggle = 0;
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
} else if (hcint.b.nak && !hcintmsk.b.nak) {
/*
* If nak is not masked, it's because a non-split IN transfer
@@ -1630,10 +1632,13 @@ static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * _hcd,
halt_channel(_hcd, _hc, _qtd,
DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, must_free);
} else {
+#ifdef CONFIG_DWC_DEBUG
DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
"for halting is unknown, nyet %d, hcint 0x%08x, intsts 0x%08x\n",
__func__, _hc->hc_num, hcint.b.nyet, hcint.d32,
dwc_read_reg32(&_hcd->core_if->core_global_regs->gintsts));
+#endif
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
}
}
}
@@ -1658,13 +1663,12 @@ static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t * _hcd,
handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs, _qtd, must_free);
} else {
#ifdef CONFIG_DWC_DEBUG
- if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd)) {
+ if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd, must_free)) {
return 1;
}
#endif /* */
release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
}
- clear_hc_int(_hc_regs, chhltd);
return 1;
}
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c.org b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c.org
new file mode 100644
index 00000000000..39a995f9948
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c.org
@@ -0,0 +1,1746 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_intr.c $
+ * $Revision: #7 $
+ * $Date: 2005/11/02 $
+ * $Change: 553126 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef CONFIG_DWC_DEVICE_ONLY
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+const int erratum_usb09_patched = 0;
+const int deferral_on = 1;
+int nak_deferral_delay = 20;
+module_param(nak_deferral_delay, int, 0644);
+const int nyet_deferral_delay = 1;
+
+/** @file
+ * This file contains the implementation of the HCD Interrupt handlers.
+ */
+
+/** This function handles interrupts for the HCD. */
+int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ int retval = 0;
+ dwc_otg_core_if_t * core_if = _dwc_otg_hcd->core_if;
+ gintsts_data_t gintsts;
+
+#ifdef CONFIG_DWC_DEBUG
+ dwc_otg_core_global_regs_t * global_regs = core_if->core_global_regs;
+
+#endif /* */
+
+ /* Check if HOST Mode */
+ if (dwc_otg_is_host_mode(core_if)) {
+ gintsts.d32 = dwc_otg_read_core_intr(core_if);
+ if (!gintsts.d32) {
+ return 0;
+ }
+#ifdef CONFIG_DWC_DEBUG
+ /* Don't print debug message in the interrupt handler on SOF */
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ DWC_DEBUGPL(DBG_HCD, "\n");
+#endif /* */
+
+#ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ DWC_DEBUGPL(DBG_HCD,"DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
+ gintsts.d32);
+#endif /* */
+ if (gintsts.b.sofintr) {
+ retval |= dwc_otg_hcd_handle_sof_intr(_dwc_otg_hcd);
+ }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (!atomic_read(&release_later) && gintsts.b.rxstsqlvl) {
+#else
+ if (gintsts.b.rxstsqlvl) {
+#endif
+ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(_dwc_otg_hcd);
+ }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (!atomic_read(&release_later) && gintsts.b.nptxfempty) {
+#else
+ if (gintsts.b.nptxfempty) {
+#endif
+ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(_dwc_otg_hcd);
+ }
+ if (gintsts.b.i2cintr) {
+ /** @todo Implement i2cintr handler. */
+ }
+ if (gintsts.b.portintr) {
+ retval |= dwc_otg_hcd_handle_port_intr(_dwc_otg_hcd);
+ }
+ if (gintsts.b.hcintr) {
+ retval |= dwc_otg_hcd_handle_hc_intr(_dwc_otg_hcd);
+ }
+ if (gintsts.b.ptxfempty) {
+ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(_dwc_otg_hcd);
+ }
+
+#ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ {
+ DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD Finished Servicing Interrupts\n");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
+ dwc_read_reg32(&global_regs->gintsts));
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
+ dwc_read_reg32(&global_regs->gintmsk));
+ }
+#endif /* */
+
+#ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ DWC_DEBUGPL(DBG_HCD, "\n");
+#endif /* */
+ }
+ return retval;
+}
+
+
+#ifdef DWC_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs
+#define FRAME_NUM_ARRAY_SIZE 1000
+/**
+ * This function is for debug only.
+ */
+static inline void track_missed_sofs(uint16_t _curr_frame_number)
+{
+ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static int frame_num_idx = 0;
+ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
+ static int dumped_frame_num_array = 0;
+ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+ if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) !=
+ _curr_frame_number)) {
+ frame_num_array[frame_num_idx] = _curr_frame_number;
+ last_frame_num_array[frame_num_idx++] = last_frame_num;
+ }
+ } else if (!dumped_frame_num_array) {
+ int i;
+ printk(KERN_EMERG USB_DWC "Frame Last Frame\n");
+ printk(KERN_EMERG USB_DWC "----- ----------\n");
+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n",
+ frame_num_array[i], last_frame_num_array[i]);
+ }
+ dumped_frame_num_array = 1;
+ }
+ last_frame_num = _curr_frame_number;
+}
+#endif /* */
+
+/**
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller for the
+ * next (micro)frame.
+ */
+int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t * _hcd)
+{
+ hfnum_data_t hfnum;
+ struct list_head *qh_entry;
+ dwc_otg_qh_t * qh;
+ dwc_otg_transaction_type_e tr_type;
+ gintsts_data_t gintsts = {.d32 = 0};
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
+#endif /* */
+ _hcd->frame_number = hfnum.b.frnum;
+#ifdef CONFIG_DWC_DEBUG
+ _hcd->frrem_accum += hfnum.b.frrem;
+ _hcd->frrem_samples++;
+#endif /* */
+
+#ifdef DWC_TRACK_MISSED_SOFS
+ track_missed_sofs(_hcd->frame_number);
+#endif /* */
+
+ /* Determine whether any periodic QHs should be executed. */
+ qh_entry = _hcd->periodic_sched_inactive.next;
+ while (qh_entry != &_hcd->periodic_sched_inactive) {
+ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
+ qh_entry = qh_entry->next;
+ if (dwc_frame_num_le(qh->sched_frame, _hcd->frame_number)) {
+ /*
+ * Move QH to the ready list to be executed next
+ * (micro)frame.
+ */
+ list_move(&qh->qh_list_entry,&_hcd->periodic_sched_ready);
+ }
+ }
+ tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ //schedule_work(&_hcd->hcd_queue_work);
+ }
+
+ /* Clear interrupt */
+ gintsts.b.sofintr = 1;
+ dwc_write_reg32(&_hcd->core_if->core_global_regs->gintsts,gintsts.d32);
+ return 1;
+}
+
+/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
+ * least one packet in the Rx FIFO. The packets are moved from the FIFO to
+ * memory if the DWC_otg controller is operating in Slave mode. */
+int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ host_grxsts_data_t grxsts;
+ dwc_hc_t * hc = NULL;
+ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
+ grxsts.d32 = dwc_read_reg32(&_dwc_otg_hcd->core_if->core_global_regs->grxstsp);
+ hc = _dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
+
+ /* Packet Status */
+ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
+ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
+ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid,
+ hc->data_pid_start);
+ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer. */
+ if (grxsts.b.bcnt > 0) {
+ dwc_otg_read_packet(_dwc_otg_hcd->core_if,
+ hc->xfer_buff, grxsts.b.bcnt);
+
+ /* Update the HC fields for the next packet received. */
+ hc->xfer_count += grxsts.b.bcnt;
+ hc->xfer_buff += grxsts.b.bcnt;
+ }
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
+ case DWC_GRXSTS_PKTSTS_CH_HALTED:
+ /* Handled in interrupt, just ignore data */
+ break;
+ default:
+ DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
+ grxsts.b.pktsts);
+ break;
+ }
+ return 1;
+}
+
+
+/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *
+ _dwc_otg_hcd)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_NON_PERIODIC);
+ //schedule_work(&_dwc_otg_hcd->hcd_queue_work);
+ return 1;
+}
+
+
+/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *
+ _dwc_otg_hcd)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_PERIODIC);
+ //schedule_work(&_dwc_otg_hcd->hcd_queue_work);
+ return 1;
+}
+
+
+/** There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately. */
+int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ int retval = 0;
+ hprt0_data_t hprt0;
+ hprt0_data_t hprt0_modify;
+ hprt0.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
+ hprt0_modify.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
+
+ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
+ * GINTSTS */
+ hprt0_modify.b.prtena = 0;
+ hprt0_modify.b.prtconndet = 0;
+ hprt0_modify.b.prtenchng = 0;
+ hprt0_modify.b.prtovrcurrchng = 0;
+
+ /* Port Connect Detected
+ * Set flag and clear if detected */
+ if (hprt0.b.prtconndet) {
+ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
+ "Port Connect Detected--\n", hprt0.d32);
+ _dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ _dwc_otg_hcd->flags.b.port_connect_status = 1;
+ hprt0_modify.b.prtconndet = 1;
+
+ /* B-Device has connected, Delete the connection timer. */
+ del_timer(&_dwc_otg_hcd->conn_timer);
+
+ /* The Hub driver asserts a reset when it sees port connect
+ * status change flag
+ */
+ retval |= 1;
+ }
+
+ /* Port Enable Changed
+ * Clear if detected - Set internal flag if disabled */
+ if (hprt0.b.prtenchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Enable Changed--\n", hprt0.d32);
+ hprt0_modify.b.prtenchng = 1;
+ if (hprt0.b.prtena == 1) {
+ int do_reset = 0;
+ dwc_otg_core_params_t * params =
+ _dwc_otg_hcd->core_if->core_params;
+ dwc_otg_core_global_regs_t * global_regs =
+ _dwc_otg_hcd->core_if->core_global_regs;
+ dwc_otg_host_if_t * host_if =
+ _dwc_otg_hcd->core_if->host_if;
+
+ /* Check if we need to adjust the PHY clock speed for
+ * low power and adjust it */
+ if (params->host_support_fs_ls_low_power) {
+ gusbcfg_data_t usbcfg;
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ||
+ (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED)) {
+ /*
+ * Low power
+ */
+ hcfg_data_t hcfg;
+ if (usbcfg.b.phylpwrclksel == 0) {
+ /* Set PHY low power clock select for FS/LS devices */
+ usbcfg.b.phylpwrclksel = 1;
+ dwc_write_reg32(&global_regs->gusbcfg,usbcfg.d32);
+ do_reset = 1;
+ }
+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) &&
+ (params->host_ls_low_power_phy_clk ==
+ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+ /* 6 MHZ */
+ DWC_DEBUGPL(DBG_CIL,"FS_PHY programming HCFG to 6 MHz (Low Power)\n");
+ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) {
+ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ do_reset = 1;
+ }
+ } else {
+ /* 48 MHZ */
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n");
+ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) {
+ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ do_reset = 1;
+ }
+ }
+ } else {
+ /*
+ * Not low power
+ */
+ if (usbcfg.b.phylpwrclksel == 1) {
+ usbcfg.b.phylpwrclksel = 0;
+ dwc_write_reg32(&global_regs->gusbcfg,usbcfg.d32);
+ do_reset = 1;
+ }
+ }
+ if (do_reset) {
+ tasklet_schedule(_dwc_otg_hcd->reset_tasklet);
+ }
+ }
+ if (!do_reset) {
+ /* Port has been enabled set the reset change flag */
+ _dwc_otg_hcd->flags.b.port_reset_change = 1;
+ }
+ } else {
+ _dwc_otg_hcd->flags.b.port_enable_change = 1;
+ }
+ retval |= 1;
+ }
+
+ /** Overcurrent Change Interrupt */
+ if (hprt0.b.prtovrcurrchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Overcurrent Changed--\n", hprt0.d32);
+ _dwc_otg_hcd->flags.b.port_over_current_change = 1;
+ hprt0_modify.b.prtovrcurrchng = 1;
+ retval |= 1;
+ }
+
+ /* Clear Port Interrupts */
+ dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0,hprt0_modify.d32);
+ return retval;
+}
+
+/** This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately. */
+int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ int i;
+ int retval = 0;
+ haint_data_t haint;
+
+ /* Clear appropriate bits in HCINTn to clear the interrupt bit in
+ * GINTSTS */
+ haint.d32 = dwc_otg_read_host_all_channels_intr(_dwc_otg_hcd->core_if);
+ for (i = 0; i < _dwc_otg_hcd->core_if->core_params->host_channels;i++) {
+ if (haint.b2.chint & (1 << i)) {
+ retval |= dwc_otg_hcd_handle_hc_n_intr(_dwc_otg_hcd, i);
+ }
+ }
+ return retval;
+}
+
+/* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hc_regs_,_intr_) \
+ do { \
+ hcint_data_t hcint_clear = { .d32 = 0}; \
+ hcint_clear.b._intr_ = 1; \
+ dwc_write_reg32(&((_hc_regs_)->hcint), hcint_clear.d32); \
+ } while (0)
+
+/*
+ * Macro used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+#define disable_hc_int(_hc_regs_,_intr_) \
+ do { \
+ hcintmsk_data_t hcintmsk = {.d32 = 0}; \
+ hcintmsk.b._intr_ = 1; \
+ dwc_modify_reg32(&((_hc_regs_)->hcintmsk), hcintmsk.d32, 0); \
+ } while (0)
+
+/**
+ * Gets the actual length of a transfer after the transfer halts. _halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where _halt_status is DWC_OTG_HC_XFER_COMPLETE,
+ * *_short_read is set to 1 upon return if less than the requested
+ * number of bytes were transferred. Otherwise, *_short_read is set to 0 upon
+ * return. _short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static uint32_t get_actual_xfer_length(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status, int *_short_read)
+{
+ hctsiz_data_t hctsiz;
+ uint32_t length;
+ if (_short_read != NULL) {
+ *_short_read = 0;
+ }
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (_halt_status == DWC_OTG_HC_XFER_COMPLETE) {
+ if (_hc->ep_is_in) {
+ length = _hc->xfer_len - hctsiz.b.xfersize;
+ if (_short_read != NULL) {
+ *_short_read = (hctsiz.b.xfersize != 0);
+ }
+ } else if (_hc->qh->do_split) {
+ length = _qtd->ssplit_out_xfer_count;
+ } else {
+ length = _hc->xfer_len;
+ }
+ } else {
+ /*
+ * Must use the hctsiz.pktcnt field to determine how much data
+ * has been transferred. This field reflects the number of
+ * packets that have been transferred via the USB. This is
+ * always an integral number of packets if the transfer was
+ * halted before its normal completion. (Can't use the
+ * hctsiz.xfersize field because that reflects the number of
+ * bytes transferred via the AHB, not the USB).
+ */
+ length = (_hc->start_pkt_count - hctsiz.b.pktcnt) * _hc->max_packet;
+ }
+ return length;
+}
+
+/**
+ * Updates the state of the URB after a Transfer Complete interrupt on the
+ * host channel. Updates the actual_length field of the URB based on the
+ * number of bytes transferred via the host channel. Sets the URB status
+ * if the data transfer is finished.
+ *
+ * @return 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise.
+ */
+static int update_urb_state_xfer_comp(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb,
+ dwc_otg_qtd_t * _qtd, int *status)
+{
+ int xfer_done = 0;
+ int short_read = 0;
+ _urb->actual_length += get_actual_xfer_length(_hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_COMPLETE, &short_read);
+ if (short_read || (_urb->actual_length == _urb->transfer_buffer_length)) {
+ xfer_done = 1;
+ if (short_read && (_urb->transfer_flags & URB_SHORT_NOT_OK)) {
+ *status = -EREMOTEIO;
+ } else {
+ *status = 0;
+ }
+ }
+
+#ifdef CONFIG_DWC_DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", _hc->xfer_len);
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n",hctsiz.b.xfersize);
+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
+ _urb->transfer_buffer_length);
+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
+ _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
+ short_read, xfer_done);
+ }
+#endif /* */
+ return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+static void save_data_toggle(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+{
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
+ dwc_otg_qh_t * qh = _hc->qh;
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+ } else {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA1;
+ }
+ } else {
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
+ } else {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+ }
+ }
+}
+
+/**
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void deactivate_qh(dwc_otg_hcd_t * _hcd,
+ dwc_otg_qh_t * _qh, int free_qtd)
+{
+ int continue_split = 0;
+ dwc_otg_qtd_t * qtd;
+ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, _hcd, _qh,
+ free_qtd);
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+ if (qtd->complete_split) {
+ continue_split = 1;
+ } else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID) ||
+ (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END)) {
+ continue_split = 1;
+ }
+
+ if (free_qtd) {
+ /*
+ * Note that this was previously a call to
+ * dwc_otg_hcd_qtd_remove_and_free(qtd), which frees the qtd.
+ * However, that call frees the qtd memory, and we continue in the
+ * interrupt logic to access it many more times, including writing
+ * to it. With slub debugging on, it is clear that we were writing
+ * to memory we had freed.
+ * Call this instead, and now I have moved the freeing of the memory to
+ * the end of processing this interrupt.
+ */
+ dwc_otg_hcd_qtd_remove(qtd);
+
+ continue_split = 0;
+ }
+ _qh->channel = NULL;
+ _qh->qtd_in_process = NULL;
+ dwc_otg_hcd_qh_deactivate(_hcd, _qh, continue_split);
+}
+
+/**
+ * Updates the state of an Isochronous URB when the transfer is stopped for
+ * any reason. The fields of the current entry in the frame descriptor array
+ * are set based on the transfer state and the input _halt_status. Completes
+ * the Isochronous URB if all the URB frames have been completed.
+ *
+ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
+ */
+static dwc_otg_halt_status_e update_isoc_urb_state(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status)
+{
+ struct urb *urb = _qtd->urb;
+ dwc_otg_halt_status_e ret_val = _halt_status;
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &urb->iso_frame_desc[_qtd->isoc_frame_index];
+ switch (_halt_status) {
+ case DWC_OTG_HC_XFER_COMPLETE:
+ frame_desc->status = 0;
+ frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt_status,NULL);
+ break;
+ case DWC_OTG_HC_XFER_FRAME_OVERRUN:
+ urb->error_count++;
+ if (_hc->ep_is_in) {
+ frame_desc->status = -ENOSR;
+ } else {
+ frame_desc->status = -ECOMM;
+ }
+ frame_desc->actual_length = 0;
+ break;
+ case DWC_OTG_HC_XFER_BABBLE_ERR:
+ urb->error_count++;
+ frame_desc->status = -EOVERFLOW;
+
+ /* Don't need to update actual_length in this case. */
+ break;
+ case DWC_OTG_HC_XFER_XACT_ERR:
+ urb->error_count++;
+ frame_desc->status = -EPROTO;
+ frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt_status,NULL);
+ default:
+ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, _halt_status);
+ BUG();
+ break;
+ }
+ if (++_qtd->isoc_frame_index == urb->number_of_packets) {
+ /*
+ * urb->status is not used for isoc transfers.
+ * The individual frame_desc statuses are used instead.
+ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, 0);
+ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ ret_val = DWC_OTG_HC_XFER_COMPLETE;
+ }
+ return ret_val;
+}
+
+/**
+ * Releases a host channel for use by other transfers. Attempts to select and
+ * queue more transactions since at least one host channel is available.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _hc The host channel to release.
+ * @param _qtd The QTD associated with the host channel. This QTD may be freed
+ * if the transfer is complete or an error has occurred.
+ * @param _halt_status Reason the channel is being released. This status
+ * determines the actions taken by this function.
+ */
+
+static void release_channel(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_status_e _halt_status, int *must_free) {
+ dwc_otg_transaction_type_e tr_type;
+ int free_qtd;
+ dwc_otg_qh_t * _qh;
+ int deact = 1;
+ int retry_delay = 1;
+
+ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", __func__,
+ _hc->hc_num, _halt_status);
+ switch (_halt_status) {
+ case DWC_OTG_HC_XFER_NYET:
+ case DWC_OTG_HC_XFER_NAK:
+ if (_halt_status == DWC_OTG_HC_XFER_NYET) {
+ retry_delay = nyet_deferral_delay;
+ } else {
+ retry_delay = nak_deferral_delay;
+ }
+ free_qtd = 0;
+ if (deferral_on && _hc->do_split) {
+ _qh = _hc->qh;
+ if (_qh) {
+ deact = dwc_otg_hcd_qh_deferr(_hcd, _qh , retry_delay);
+ }
+ }
+ break;
+
+ case DWC_OTG_HC_XFER_URB_COMPLETE:
+ free_qtd = 1;
+ break;
+ case DWC_OTG_HC_XFER_AHB_ERR:
+ case DWC_OTG_HC_XFER_STALL:
+ case DWC_OTG_HC_XFER_BABBLE_ERR:
+ free_qtd = 1;
+ break;
+ case DWC_OTG_HC_XFER_XACT_ERR:
+ if (_qtd->error_count >= 3) {
+ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n");
+ free_qtd = 1;
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPROTO);
+ } else {
+ free_qtd = 0;
+ }
+ break;
+ case DWC_OTG_HC_XFER_URB_DEQUEUE:
+ /*
+ * The QTD has already been removed and the QH has been
+ * deactivated. Don't want to do anything except release the
+ * host channel and try to queue more transfers.
+ */
+ goto cleanup;
+ case DWC_OTG_HC_XFER_NO_HALT_STATUS:
+ DWC_ERROR("%s: No halt_status, channel %d\n", __func__,
+ _hc->hc_num);
+ free_qtd = 0;
+ break;
+ default:
+ free_qtd = 0;
+ break;
+ }
+ *must_free = free_qtd;
+ if (deact) {
+ deactivate_qh(_hcd, _hc->qh, free_qtd);
+ }
+cleanup:
+ /*
+ * Release the host channel for use by other transfers. The cleanup
+ * function clears the channel interrupt enables and conditions, so
+ * there's no need to clear the Channel Halted interrupt separately.
+ */
+ dwc_otg_hc_cleanup(_hcd->core_if, _hc);
+ list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_list);
+ switch (_hc->ep_type) {
+ case DWC_OTG_EP_TYPE_CONTROL:
+ case DWC_OTG_EP_TYPE_BULK:
+ _hcd->non_periodic_channels--;
+ break;
+ default:
+ /*
+ * Don't release reservations for periodic channels here.
+ * That's done when a periodic transfer is descheduled (i.e.
+ * when the QH is removed from the periodic schedule).
+ */
+ break;
+ }
+ /* Try to queue more transfers now that there's a free channel, */
+ /* unless erratum_usb09_patched is set */
+ if (!erratum_usb09_patched) {
+ tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ }
+ }
+}
+
+/**
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void halt_channel(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ if (_hcd->core_if->dma_enable) {
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ return;
+ }
+
+ /* Slave mode processing... */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _halt_status);
+ if (_hc->halt_on_queue) {
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ dwc_otg_core_global_regs_t * global_regs;
+ global_regs = _hcd->core_if->core_global_regs;
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
+ /*
+ * Make sure the Non-periodic Tx FIFO empty interrupt
+ * is enabled so that the non-periodic schedule will
+ * be processed.
+ */
+ gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
+ } else {
+ /*
+ * Move the QH from the periodic queued schedule to
+ * the periodic assigned schedule. This allows the
+ * halt to be queued when the periodic schedule is
+ * processed.
+ */
+ list_move(&_hc->qh->qh_list_entry,
+ &_hcd->periodic_sched_assigned);
+
+ /*
+ * Make sure the Periodic Tx FIFO Empty interrupt is
+ * enabled so that the periodic schedule will be
+ * processed.
+ */
+ gintmsk.b.ptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, 0,gintmsk.d32);
+ }
+ }
+}
+
+/**
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void complete_non_periodic_xfer(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ hcint_data_t hcint;
+ _qtd->error_count = 0;
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ if (hcint.b.nyet) {
+ /*
+ * Got a NYET on the last transaction of the transfer. This
+ * means that the endpoint should be in the PING state at the
+ * beginning of the next transfer.
+ */
+ _hc->qh->ping_state = 1;
+ clear_hc_int(_hc_regs, nyet);
+ }
+
+ /*
+ * Always halt and release the host channel to make it available for
+ * more transfers. There may still be more phases for a control
+ * transfer or more data packets for a bulk transfer at this point,
+ * but the host channel is still halted. A channel will be reassigned
+ * to the transfer when the non-periodic schedule is processed after
+ * the channel is released. This allows transactions to be queued
+ * properly via dwc_otg_hcd_queue_transactions, which also enables the
+ * Tx FIFO Empty interrupt if necessary.
+ */
+ if (_hc->ep_is_in) {
+ /*
+ * IN transfers in Slave mode require an explicit disable to
+ * halt the channel. (In DMA mode, this call simply releases
+ * the channel.)
+ */
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ } else {
+ /*
+ * The channel is automatically disabled by the core for OUT
+ * transfers in Slave mode.
+ */
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ }
+}
+
+/**
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void complete_periodic_xfer(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ hctsiz_data_t hctsiz;
+ _qtd->error_count = 0;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (!_hc->ep_is_in || hctsiz.b.pktcnt == 0) {
+ /* Core halts channel in these cases. */
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ } else {
+ /* Flush any outstanding requests from the Tx queue. */
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ }
+}
+
+/**
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ int urb_xfer_done;
+ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe);
+ int status = -EINPROGRESS;
+
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transfer Complete--\n", _hc->hc_num);
+
+ /*
+ * Handle xfer complete on CSPLIT.
+ */
+ if (_hc->qh->do_split) {
+ _qtd->complete_split = 0;
+ }
+
+ /* Update the QTD and URB states. */
+ switch (pipe_type) {
+ case PIPE_CONTROL:
+ switch (_qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP:
+ if (urb->transfer_buffer_length > 0) {
+ _qtd->control_phase = DWC_OTG_CONTROL_DATA;
+ } else {
+ _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+ }
+ DWC_DEBUGPL(DBG_HCDV,
+ " Control setup transaction done\n");
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ break;
+ case DWC_OTG_CONTROL_DATA:{
+ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs,urb, _qtd, &status);
+ if (urb_xfer_done) {
+ _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+ DWC_DEBUGPL(DBG_HCDV," Control data transfer done\n");
+ } else {
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ }
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ break;
+ }
+ case DWC_OTG_CONTROL_STATUS:
+ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
+ if (status == -EINPROGRESS) {
+ status = 0;
+ }
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+ break;
+ }
+ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
+ halt_status, must_free);
+ break;
+ case PIPE_BULK:
+ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
+ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
+ if (urb_xfer_done) {
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ }
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,halt_status, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
+ update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
+ /*
+ * Interrupt URB is done on the first transfer complete
+ * interrupt.
+ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_URB_COMPLETE, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
+ if (_qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) {
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_COMPLETE);
+ }
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd, halt_status, must_free);
+ break;
+ }
+ disable_hc_int(_hc_regs, xfercompl);
+ return 1;
+}
+
+/**
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_stall_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe);
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "STALL Received--\n", _hc->hc_num);
+ if (pipe_type == PIPE_CONTROL) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
+ }
+ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
+ /*
+ * USB protocol requires resetting the data toggle for bulk
+ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+ * setup command is issued to the endpoint. Anticipate the
+ * CLEAR_FEATURE command since a STALL has occurred and reset
+ * the data toggle now.
+ */
+ _hc->qh->data_toggle = 0;
+ }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_STALL, must_free);
+ disable_hc_int(_hc_regs, stall);
+ return 1;
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void update_urb_state_xfer_intr(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status)
+{
+ uint32_t bytes_transferred =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt_status, NULL);
+ _urb->actual_length += bytes_transferred;
+
+#ifdef CONFIG_DWC_DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (_hc->ep_is_in ? "IN" : "OUT"),_hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " _hc->start_pkt_count %d\n",
+ _hc->start_pkt_count);
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
+ DWC_DEBUGPL(DBG_HCDV, " _hc->max_packet %d\n",_hc->max_packet);
+ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n",
+ bytes_transferred);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->actual_length %d\n",
+ _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->transfer_buffer_length %d\n",
+ _urb->transfer_buffer_length);
+ }
+#endif /* */
+}
+
+/**
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nak_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NAK Received--\n", _hc->hc_num);
+ /*
+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+ * interrupt. Re-start the SSPLIT transfer.
+ */
+ if (_hc->do_split) {
+ if (_hc->complete_split) {
+ _qtd->error_count = 0;
+ }
+ _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ goto handle_nak_done;
+ }
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ if (_hcd->core_if->dma_enable && _hc->ep_is_in) {
+ /*
+ * NAK interrupts are enabled on bulk/control IN
+ * transfers in DMA mode for the sole purpose of
+ * resetting the error count after a transaction error
+ * occurs. The core will continue transferring data.
+ */
+ _qtd->error_count = 0;
+ goto handle_nak_done;
+ }
+
+ /*
+ * NAK interrupts normally occur during OUT transfers in DMA
+ * or Slave mode. For IN transfers, more requests will be
+ * queued as request queue space is available.
+ */
+ _qtd->error_count = 0;
+ if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
+ _qtd, DWC_OTG_HC_XFER_NAK);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ if (_qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ _hc->qh->ping_state = 1;
+ }
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will
+ * start/continue.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ _qtd->error_count = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ /* Should never get called for isochronous transfers. */
+ BUG();
+ break;
+ }
+ handle_nak_done:disable_hc_int(_hc_regs, nak);
+ clear_hc_int(_hc_regs, nak);
+ return 1;
+}
+
+/**
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "ACK Received--\n", _hc->hc_num);
+ if (_hc->do_split) {
+ /*
+ * Handle ACK on SSPLIT.
+ * ACK should not occur in CSPLIT.
+ */
+ if ((!_hc->ep_is_in) && (_hc->data_pid_start != DWC_OTG_HC_PID_SETUP)) {
+ _qtd->ssplit_out_xfer_count = _hc->xfer_len;
+ }
+ if (!(_hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !_hc->ep_is_in)) {
+ /* Don't need complete for isochronous out transfers. */
+ _qtd->complete_split = 1;
+ }
+
+ /* ISOC OUT */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && !_hc->ep_is_in) {
+ switch (_hc->xact_pos) {
+ case DWC_HCSPLIT_XACTPOS_ALL:
+ break;
+ case DWC_HCSPLIT_XACTPOS_END:
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ _qtd->isoc_split_offset = 0;
+ break;
+ case DWC_HCSPLIT_XACTPOS_BEGIN:
+ case DWC_HCSPLIT_XACTPOS_MID:
+ /*
+ * For BEGIN or MID, calculate the length for
+ * the next microframe to determine the correct
+ * SSPLIT token, either MID or END.
+ */
+ do {
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &_qtd->urb->iso_frame_desc[_qtd->isoc_frame_index];
+ _qtd->isoc_split_offset += 188;
+ if ((frame_desc->length - _qtd->isoc_split_offset) <=188) {
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END;
+ } else {
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID;
+ }
+ } while (0);
+ break;
+ }
+ } else {
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
+ }
+ } else {
+ _qtd->error_count = 0;
+ if (_hc->qh->ping_state) {
+ _hc->qh->ping_state = 0;
+
+ /*
+ * Halt the channel so the transfer can be re-started
+ * from the appropriate point. This only happens in
+ * Slave mode. In DMA mode, the ping_state is cleared
+ * when the transfer is started because the core
+ * automatically executes the PING, then the transfer.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
+ }
+ }
+
+ /*
+ * If the ACK occurred when _not_ in the PING state, let the channel
+ * continue transferring data after clearing the error count.
+ */
+ disable_hc_int(_hc_regs, ack);
+ clear_hc_int(_hc_regs, ack);
+ return 1;
+}
+
+/**
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NYET Received--\n", _hc->hc_num);
+
+ /*
+ * NYET on CSPLIT
+ * re-do the CSPLIT immediately on non-periodic
+ */
+ if ((_hc->do_split) && (_hc->complete_split)) {
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_INTR) ||
+ (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC)) {
+ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd
+ (_hcd));
+ if (dwc_full_frame_num(frnum) !=
+ dwc_full_frame_num(_hc->qh->sched_frame)) {
+
+ /*
+ * No longer in the same full speed frame.
+ * Treat this as a transaction error.
+ */
+#if 0
+ /** @todo Fix system performance so this can
+ * be treated as an error. Right now complete
+ * splits cannot be scheduled precisely enough
+ * due to other system activity, so this error
+ * occurs regularly in Slave mode.
+ */
+ _qtd->error_count++;
+
+#endif /* */
+ _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+
+ /** @todo add support for isoc release */
+ goto handle_nyet_done;
+ }
+ }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
+ goto handle_nyet_done;
+ }
+ _hc->qh->ping_state = 1;
+ _qtd->error_count = 0;
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, _qtd,
+ DWC_OTG_HC_XFER_NYET);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+
+ /*
+ * Halt the channel and re-start the transfer so the PING
+ * protocol will start.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
+handle_nyet_done:
+ disable_hc_int(_hc_regs, nyet);
+ clear_hc_int(_hc_regs, nyet);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_babble_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Babble Error--\n", _hc->hc_num);
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EOVERFLOW);
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_BABBLE_ERR, must_free);
+ } else {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_BABBLE_ERR);
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ disable_hc_int(_hc_regs, bblerr);
+ return 1;
+}
+
+/**
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+{
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+ struct urb *urb = _qtd->urb;
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "AHB Error--\n", _hc->hc_num);
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ hcdma = dwc_read_reg32(&_hc_regs->hcdma);
+ DWC_ERROR("AHB ERROR, Channel %d\n", _hc->hc_num);
+ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
+ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe));
+ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+ (usb_pipein(urb->pipe) ? "IN" : "OUT"));
+ DWC_ERROR(" Endpoint type: %s\n", ( {
+ char *pipetype;
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ pipetype = "CONTROL"; break;
+ case PIPE_BULK:
+ pipetype = "BULK"; break;
+ case PIPE_INTERRUPT:
+ pipetype = "INTERRUPT"; break;
+ case PIPE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS"; break;
+ default:
+ pipetype = "UNKNOWN"; break;
+ };
+ pipetype;
+ } )) ;
+ DWC_ERROR(" Speed: %s\n", ( {
+ char *speed;
+ switch (urb->dev->speed) {
+ case USB_SPEED_HIGH:
+ speed = "HIGH"; break;
+ case USB_SPEED_FULL:
+ speed = "FULL"; break;
+ case USB_SPEED_LOW:
+ speed = "LOW"; break;
+ default:
+ speed = "UNKNOWN"; break;
+ };
+ speed;
+ } )) ;
+ DWC_ERROR(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length);
+ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
+ urb->transfer_buffer, (void *)(u32)urb->transfer_dma);
+ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", urb->setup_packet,
+ (void *)(u32)urb->setup_dma);
+ DWC_ERROR(" Interval: %d\n", urb->interval);
+ dwc_otg_hcd_complete_urb(_hcd, urb, -EIO);
+
+ /*
+ * Force a channel halt. Don't call halt_channel because that won't
+ * write to the HCCHARn register in DMA mode to force the halt.
+ */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, DWC_OTG_HC_XFER_AHB_ERR);
+ disable_hc_int(_hc_regs, ahberr);
+ return 1;
+}
+
+/**
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transaction Error--\n", _hc->hc_num);
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ _qtd->error_count++;
+ if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
+ _qtd, DWC_OTG_HC_XFER_XACT_ERR);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ if (!_hc->ep_is_in && _qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ _hc->qh->ping_state = 1;
+ }
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will start.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ _qtd->error_count++;
+ if ((_hc->do_split) && (_hc->complete_split)) {
+ _qtd->complete_split = 0;
+ }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_XACT_ERR);
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ break;
+ }
+ disable_hc_int(_hc_regs, xacterr);
+ return 1;
+}
+
+/**
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Frame Overrun--\n", _hc->hc_num);
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ break;
+ case PIPE_INTERRUPT:
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_FRAME_OVERRUN);
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ break;
+ }
+ disable_hc_int(_hc_regs, frmovrun);
+ return 1;
+}
+
+/**
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Data Toggle Error--\n", _hc->hc_num);
+ if (_hc->ep_is_in) {
+ _qtd->error_count = 0;
+ } else {
+ DWC_ERROR("Data Toggle Error on OUT transfer,"
+ "channel %d\n", _hc->hc_num);
+ }
+ disable_hc_int(_hc_regs, datatglerr);
+ return 1;
+}
+
+#ifdef CONFIG_DWC_DEBUG
+/**
+ * This function is for debug only. It checks that a valid halt status is set
+ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ * @return 1 if halt status is ok, 0 otherwise.
+ */
+static inline int halt_status_ok(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hcsplt_data_t hcsplt;
+ if (_hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
+ /*
+ * This code is here only as a check. This condition should
+ * never happen. Ignore the halt if it does occur.
+ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
+ DWC_WARN("%s: _hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
+ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
+ "hcint 0x%08x, hcintmsk 0x%08x, "
+ "hcsplt 0x%08x, qtd->complete_split %d\n", __func__,
+ _hc->hc_num, hcchar.d32, hctsiz.d32, hcint.d32,
+ hcintmsk.d32, hcsplt.d32, _qtd->complete_split);
+ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
+ __func__, _hc->hc_num);
+ DWC_WARN("\n");
+ clear_hc_int(_hc_regs, chhltd);
+ return 0;
+ }
+
+ /*
+ * This code is here only as a check. hcchar.chdis should
+ * never be set when the halt interrupt occurs. Halt the
+ * channel again if it does occur.
+ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: hcchar.chdis set unexpectedly, "
+ "hcchar 0x%08x, trying to halt again\n", __func__,
+ hcchar.d32);
+ clear_hc_int(_hc_regs, chhltd);
+ _hc->halt_pending = 0;
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ return 0;
+ }
+ return 1;
+}
+#endif /* */
+
+/**
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ if (_hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
+ _hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+ /*
+ * Just release the channel. A dequeue can happen on a
+ * transfer timeout. In the case of an AHB Error, the channel
+ * was forced to halt because there's no way to gracefully
+ * recover.
+ */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ return;
+ }
+
+ /* Read the HCINTn register to determine the cause for the halt. */
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
+ if (hcint.b.xfercomp) {
+
+ /** @todo This is here because of a possible hardware bug. Spec
+ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+ * interrupt w/ACK bit set should occur, but I only see the
+ * XFERCOMP bit, even with it masked out. This is a workaround
+ * for that behavior. Should fix this when hardware is fixed.
+ */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!_hc->ep_is_in)) {
+ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ }
+ handle_hc_xfercomp_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.stall) {
+ handle_hc_stall_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.xacterr) {
+ /*
+ * Must handle xacterr before nak or ack. Could get a xacterr
+ * at the same time as either of these on a BULK/CONTROL OUT
+ * that started with a PING. The xacterr takes precedence.
+ */
+ handle_hc_xacterr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.nyet) {
+ /*
+ * Must handle nyet before nak or ack. Could get a nyet at the
+ * same time as either of those on a BULK/CONTROL OUT that
+ * started with a PING. The nyet takes precedence.
+ */
+ handle_hc_nyet_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.bblerr) {
+ handle_hc_babble_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.frmovrun) {
+ handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.datatglerr) {
+ handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.nak && !hcintmsk.b.nak) {
+ /*
+ * If nak is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the nak is handled by
+ * the nak interrupt handler, not here. Handle nak here for
+ * BULK/CONTROL OUT transfers, which halt on a NAK to allow
+ * rewinding the buffer pointer.
+ */
+ handle_hc_nak_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.ack && !hcintmsk.b.ack) {
+ /*
+ * If ack is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the ack is handled by
+ * the ack interrupt handler, not here. Handle ack here for
+ * split transfers. Start splits halt on ACK.
+ */
+ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else {
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * A periodic transfer halted with no other channel
+ * interrupts set. Assume it was halted by the core
+ * because it could not be completed in its scheduled
+ * (micro)frame.
+ */
+#ifdef CONFIG_DWC_DEBUG
+ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n",
+ __func__, _hc->hc_num);
+
+#endif /* */
+ halt_channel(_hcd, _hc, _qtd,
+ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, must_free);
+ } else {
+ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
+ "for halting is unknown, nyet %d, hcint 0x%08x, intsts 0x%08x\n",
+ __func__, _hc->hc_num, hcint.b.nyet, hcint.d32,
+ dwc_read_reg32(&_hcd->core_if->core_global_regs->gintsts));
+ }
+ }
+}
+
+/**
+ * Handles a host channel Channel Halted interrupt.
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Channel Halted--\n", _hc->hc_num);
+ if (_hcd->core_if->dma_enable) {
+ handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else {
+#ifdef CONFIG_DWC_DEBUG
+ if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd)) {
+ return 1;
+ }
+#endif /* */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ }
+ clear_hc_int(_hc_regs, chhltd);
+ return 1;
+}
+
+/** Handles interrupt for a specific Host Channel */
+int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t * _dwc_otg_hcd, uint32_t _num)
+{
+ int must_free = 0;
+ int retval = 0;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ dwc_hc_t * hc;
+ dwc_otg_hc_regs_t * hc_regs;
+ dwc_otg_qtd_t * qtd;
+ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n",_num);
+ hc = _dwc_otg_hcd->hc_ptr_array[_num];
+ hc_regs = _dwc_otg_hcd->core_if->host_if->hc_regs[_num];
+ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
+ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
+ hcint.d32 = hcint.d32 & hcintmsk.d32;
+ if (!_dwc_otg_hcd->core_if->dma_enable) {
+ if ((hcint.b.chhltd) && (hcint.d32 != 0x2)) {
+ hcint.b.chhltd = 0;
+ }
+ }
+ if (hcint.b.xfercomp) {
+ retval |= handle_hc_xfercomp_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ /*
+ * If NYET occurred at same time as Xfer Complete, the NYET is
+ * handled by the Xfer Complete interrupt handler. Don't want
+ * to call the NYET interrupt handler in this case.
+ */
+ hcint.b.nyet = 0;
+ }
+ if (hcint.b.chhltd) {
+ retval |= handle_hc_chhltd_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.ahberr) {
+ retval |= handle_hc_ahberr_intr(_dwc_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.stall) {
+ retval |= handle_hc_stall_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.nak) {
+ retval |= handle_hc_nak_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.ack) {
+ retval |= handle_hc_ack_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.nyet) {
+ retval |= handle_hc_nyet_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.xacterr) {
+ retval |= handle_hc_xacterr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.bblerr) {
+ retval |= handle_hc_babble_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.frmovrun) {
+ retval |= handle_hc_frmovrun_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.datatglerr) {
+ retval |= handle_hc_datatglerr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ /*
+ * Logic to free the qtd here, at the end of the hc intr
+ * processing, if the handling of this interrupt determined
+ * that it needs to be freed.
+ */
+ if (must_free) {
+ /* Free the qtd here now that we are done using it. */
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+ return retval;
+}
+
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c
index b33b32e8468..3bee609586a 100644
--- a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c
@@ -138,6 +138,8 @@ void dwc_otg_hcd_qh_init(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh,
/* FS/LS Enpoint on HS Hub
* NOT virtual root hub */
_qh->do_split = 0;
+ _qh->speed = _urb->dev->speed;
+
if (((_urb->dev->speed == USB_SPEED_LOW) ||
(_urb->dev->speed == USB_SPEED_FULL)) &&
(_urb->dev->tt) && (_urb->dev->tt->hub) && (_urb->dev->tt->hub->devnum != 1)) {
@@ -229,71 +231,157 @@ void dwc_otg_hcd_qh_init(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh,
}
/**
- * Checks that a channel is available for a periodic transfer.
- *
- * @return 0 if successful, negative error code otherise.
+ * Microframe scheduler
+ * track the total use in hcd->frame_usecs
+ * keep each qh use in qh->frame_usecs
+ * when surrendering the qh then donate the time back
+ */
+const unsigned short max_uframe_usecs[]={ 100, 100, 100, 100, 100, 100, 30, 0 };
+
+/*
+ * called from dwc_otg_hcd.c:dwc_otg_hcd_init
*/
-static int periodic_channel_available(dwc_otg_hcd_t * _hcd)
+int init_hcd_usecs(dwc_otg_hcd_t *_hcd)
{
- /*
- * Currently assuming that there is a dedicated host channnel for each
- * periodic transaction plus at least one host channel for
- * non-periodic transactions.
- */
- int status;
- int num_channels;
- num_channels = _hcd->core_if->core_params->host_channels;
- if ((_hcd->periodic_channels + _hcd->non_periodic_channels <
- num_channels) && (_hcd->periodic_channels < num_channels - 1)) {
- status = 0;
- } else {
- DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
- __func__, num_channels, _hcd->periodic_channels,
- _hcd->non_periodic_channels);
- status = -ENOSPC;
+ int i;
+ for (i=0; i<8; i++) {
+ _hcd->frame_usecs[i] = max_uframe_usecs[i];
}
- return status;
+ return 0;
}
-/**
- * Checks that there is sufficient bandwidth for the specified QH in the
- * periodic schedule. For simplicity, this calculation assumes that all the
- * transfers in the periodic schedule may occur in the same (micro)frame.
- *
- * @param _hcd The HCD state structure for the DWC OTG controller.
- * @param _qh QH containing periodic bandwidth required.
- *
- * @return 0 if successful, negative error code otherwise.
+static int find_single_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int i;
+ unsigned short utime;
+ int t_left;
+ int ret;
+ int done;
+
+ ret = -1;
+ utime = _qh->usecs;
+ t_left = utime;
+ i = 0;
+ done = 0;
+ while (done == 0) {
+ /* At the start _hcd->frame_usecs[i] = max_uframe_usecs[i]; */
+ if (utime <= _hcd->frame_usecs[i]) {
+ _hcd->frame_usecs[i] -= utime;
+ _qh->frame_usecs[i] += utime;
+ t_left -= utime;
+ ret = i;
+ done = 1;
+ return ret;
+ } else {
+ i++;
+ if (i == 8) {
+ done = 1;
+ ret = -1;
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * use this for FS apps that can span multiple uframes
*/
-static int check_periodic_bandwidth(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+static int find_multi_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
{
- int status;
- uint16_t max_claimed_usecs;
- status = 0;
- if (_hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) {
- /*
- * High speed mode.
- * Max periodic usecs is 80% x 125 usec = 100 usec.
- */
- max_claimed_usecs = 100 - _qh->usecs;
- } else {
+ int i;
+ int j;
+ unsigned short utime;
+ int t_left;
+ int ret;
+ int done;
+ unsigned short xtime;
+
+ ret = -1;
+ utime = _qh->usecs;
+ t_left = utime;
+ i = 0;
+ done = 0;
+loop:
+ while (done == 0) {
+ if(_hcd->frame_usecs[i] <= 0) {
+ i++;
+ if (i == 8) {
+ done = 1;
+ ret = -1;
+ }
+ goto loop;
+ }
+
/*
- * Full speed mode.
- * Max periodic usecs is 90% x 1000 usec = 900 usec.
+ * we need n consequtive slots
+ * so use j as a start slot j plus j+1 must be enough time (for now)
*/
- max_claimed_usecs = 900 - _qh->usecs;
+ xtime= _hcd->frame_usecs[i];
+ for (j = i+1 ; j < 8 ; j++ ) {
+ /*
+ * if we add this frame remaining time to xtime we may
+ * be OK, if not we need to test j for a complete frame
+ */
+ if ((xtime+_hcd->frame_usecs[j]) < utime) {
+ if (_hcd->frame_usecs[j] < max_uframe_usecs[j]) {
+ j = 8;
+ ret = -1;
+ continue;
+ }
+ }
+ if (xtime >= utime) {
+ ret = i;
+ j = 8; /* stop loop with a good value ret */
+ continue;
+ }
+ /* add the frame time to x time */
+ xtime += _hcd->frame_usecs[j];
+ /* we must have a fully available next frame or break */
+ if ((xtime < utime)
+ && (_hcd->frame_usecs[j] == max_uframe_usecs[j])) {
+ ret = -1;
+ j = 8; /* stop loop with a bad value ret */
+ continue;
+ }
+ }
+ if (ret >= 0) {
+ t_left = utime;
+ for (j = i; (t_left>0) && (j < 8); j++ ) {
+ t_left -= _hcd->frame_usecs[j];
+ if ( t_left <= 0 ) {
+ _qh->frame_usecs[j] += _hcd->frame_usecs[j] + t_left;
+ _hcd->frame_usecs[j]= -t_left;
+ ret = i;
+ done = 1;
+ } else {
+ _qh->frame_usecs[j] += _hcd->frame_usecs[j];
+ _hcd->frame_usecs[j] = 0;
+ }
+ }
+ } else {
+ i++;
+ if (i == 8) {
+ done = 1;
+ ret = -1;
+ }
+ }
}
- if (_hcd->periodic_usecs > max_claimed_usecs) {
-#undef USB_DWC_OTG_IGNORE_BANDWIDTH
-#ifndef USB_DWC_OTG_IGNORE_BANDWIDTH
- DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n",
- __func__, _hcd->periodic_usecs, _qh->usecs);
- status = -ENOSPC;
-#else
- status = 0;
-#endif
+ return ret;
+}
+
+static int find_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int ret;
+ ret = -1;
+
+ if (_qh->speed == USB_SPEED_HIGH) {
+ /* if this is a hs transaction we need a full frame */
+ ret = find_single_uframe(_hcd, _qh);
+ } else {
+ /* if this is a fs transaction we may need a sequence of frames */
+ ret = find_multi_uframe(_hcd, _qh);
}
- return status;
+ return ret;
}
/**
@@ -335,13 +423,24 @@ static int check_max_xfer_size(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
static int schedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
{
int status = 0;
- status = periodic_channel_available(_hcd);
- if (status) {
- DWC_NOTICE("%s: No host channel available for periodic "
- "transfer.\n", __func__);
- return status;
+ int frame;
+ status = find_uframe(_hcd, _qh);
+ frame = -1;
+ if (status == 0) {
+ frame = 7;
+ } else {
+ if (status > 0 )
+ frame = status-1;
+ }
+ /* Set the new frame up */
+ if (frame > -1) {
+ _qh->sched_frame &= ~0x7;
+ _qh->sched_frame |= (frame & 7);
}
- status = check_periodic_bandwidth(_hcd, _qh);
+
+ if (status != -1 )
+ status = 0;
+
if (status) {
DWC_NOTICE("%s: Insufficient periodic bandwidth for "
"periodic transfer.\n", __func__);
@@ -357,9 +456,6 @@ static int schedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
/* Always start in the inactive schedule. */
list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sched_inactive);
- /* Reserve the periodic channel. */
- _hcd->periodic_channels++;
-
/* Update claimed usecs per (micro)frame. */
_hcd->periodic_usecs += _qh->usecs;
@@ -442,14 +538,17 @@ done:
*/
static void deschedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
{
+ int i;
list_del_init(&_qh->qh_list_entry);
- /* Release the periodic channel reservation. */
- _hcd->periodic_channels--;
-
/* Update claimed usecs per (micro)frame. */
_hcd->periodic_usecs -= _qh->usecs;
+ for (i = 0; i < 8; i++) {
+ _hcd->frame_usecs[i] += _qh->frame_usecs[i];
+ _qh->frame_usecs[i] = 0;
+ }
+
/* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */
hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated -=
_qh->usecs / _qh->interval;
@@ -587,7 +686,7 @@ void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh,
* Remove from periodic_sched_queued and move to
* appropriate queue.
*/
- if (_qh->sched_frame == frame_number) {
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
list_move(&_qh->qh_list_entry,
&_hcd->periodic_sched_ready);
} else {
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c.org b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c.org
new file mode 100644
index 00000000000..b33b32e8468
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c.org
@@ -0,0 +1,696 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_queue.c $
+ * $Revision: #4 $
+ * $Date: 2005/09/15 $
+ * $Change: 537387 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef CONFIG_DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+/**
+ * This function allocates and initializes a QH.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] _urb Holds the information about the device/endpoint that we need
+ * to initialize the QH.
+ *
+ * @return Returns pointer to the newly allocated QH, or NULL on error. */
+dwc_otg_qh_t * dwc_otg_hcd_qh_create(dwc_otg_hcd_t * _hcd,
+ struct urb * _urb)
+{
+ dwc_otg_qh_t * qh;
+
+ /* Allocate memory */
+ /** @todo add memflags argument */
+ qh = dwc_otg_hcd_qh_alloc();
+ if (qh == NULL) {
+ return NULL;
+ }
+ dwc_otg_hcd_qh_init(_hcd, qh, _urb);
+ return qh;
+}
+
+/** Free each QTD in the QH's QTD-list then free the QH. QH should already be
+ * removed from a list. QTD list should already be empty if called from URB
+ * Dequeue.
+ *
+ * @param[in] _qh The QH to free.
+ */
+void dwc_otg_hcd_qh_free(dwc_otg_qh_t * _qh)
+{
+ dwc_otg_qtd_t * qtd;
+ struct list_head *pos;
+ unsigned long flags;
+
+ /* Free each QTD in the QTD list */
+ local_irq_save(flags);
+ for (pos = _qh->qtd_list.next; pos != &_qh->qtd_list;
+ pos = _qh->qtd_list.next) {
+ list_del(pos);
+ qtd = dwc_list_to_qtd(pos);
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+ local_irq_restore(flags);
+ kfree(_qh);
+ return;
+}
+
+/** Initializes a QH structure.
+ *
+ * @param[in] _hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] _qh The QH to init.
+ * @param[in] _urb Holds the information about the device/endpoint that we need
+ * to initialize the QH. */
+#define SCHEDULE_SLOP 10
+void dwc_otg_hcd_qh_init(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh,
+ struct urb *_urb)
+{
+ memset(_qh, 0, sizeof(dwc_otg_qh_t));
+
+ /* Initialize QH */
+ switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL:
+ _qh->ep_type = USB_ENDPOINT_XFER_CONTROL;
+ break;
+ case PIPE_BULK:
+ _qh->ep_type = USB_ENDPOINT_XFER_BULK;
+ break;
+ case PIPE_ISOCHRONOUS:
+ _qh->ep_type = USB_ENDPOINT_XFER_ISOC;
+ break;
+ case PIPE_INTERRUPT:
+ _qh->ep_type = USB_ENDPOINT_XFER_INT;
+ break;
+ }
+ _qh->ep_is_in = usb_pipein(_urb->pipe) ? 1 : 0;
+ _qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+ _qh->maxp = usb_maxpacket(_urb->dev, _urb->pipe, !(usb_pipein(_urb->pipe)));
+ INIT_LIST_HEAD(&_qh->qtd_list);
+ INIT_LIST_HEAD(&_qh->qh_list_entry);
+ _qh->channel = NULL;
+
+ /* FS/LS Enpoint on HS Hub
+ * NOT virtual root hub */
+ _qh->do_split = 0;
+ if (((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL)) &&
+ (_urb->dev->tt) && (_urb->dev->tt->hub) && (_urb->dev->tt->hub->devnum != 1)) {
+ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n",
+ usb_pipeendpoint(_urb->pipe), _urb->dev->tt->hub->devnum, _urb->dev->ttport);
+ _qh->do_split = 1;
+ }
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT
+ || _qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
+
+ /* Compute scheduling parameters once and save them. */
+ hprt0_data_t hprt;
+
+ /** @todo Account for split transfers in the bus time. */
+ int bytecount = dwc_hb_mult(_qh->maxp) * dwc_max_packet(_qh->maxp);
+ _qh->usecs = NS_TO_US(usb_calc_bus_time(_urb->dev->speed,
+ usb_pipein(_urb->pipe),
+ (_qh->ep_type == USB_ENDPOINT_XFER_ISOC),bytecount));
+
+ /* Start in a slightly future (micro)frame. */
+ _qh->sched_frame = dwc_frame_num_inc(_hcd->frame_number, SCHEDULE_SLOP);
+ _qh->interval = _urb->interval;
+
+#if 0
+ /* Increase interrupt polling rate for debugging. */
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ _qh->interval = 8;
+ }
+
+#endif /* */
+ hprt.d32 = dwc_read_reg32(_hcd->core_if->host_if->hprt0);
+ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) &&
+ ((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL))) {
+ _qh->interval *= 8;
+ _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh->sched_frame;
+ }
+ }
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", _qh);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n",
+ _urb->dev->devnum);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n",
+ usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) == USB_DIR_IN ? "IN" : "OUT");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", ( {
+ char *speed;
+ switch(_urb->dev->speed) {
+ case USB_SPEED_LOW:
+ speed = "low"; break;
+ case USB_SPEED_FULL:
+ speed = "full"; break;
+ case USB_SPEED_HIGH:
+ speed = "high"; break;
+ default:
+ speed = "?";
+ break;
+ };
+ speed;
+ } )) ;
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n", ( {
+ char *type;
+ switch (_qh->ep_type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ type = "isochronous"; break;
+ case USB_ENDPOINT_XFER_INT:
+ type = "interrupt"; break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ type = "control"; break;
+ case USB_ENDPOINT_XFER_BULK:
+ type = "bulk"; break;
+ default:
+ type = "?";break;
+ };
+ type;
+ } )) ;
+
+#ifdef CONFIG_DWC_DEBUG
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n",
+ _qh->usecs);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n",
+ _qh->interval);
+ }
+
+#endif /* */
+ return;
+}
+
+/**
+ * Checks that a channel is available for a periodic transfer.
+ *
+ * @return 0 if successful, negative error code otherise.
+ */
+static int periodic_channel_available(dwc_otg_hcd_t * _hcd)
+{
+ /*
+ * Currently assuming that there is a dedicated host channnel for each
+ * periodic transaction plus at least one host channel for
+ * non-periodic transactions.
+ */
+ int status;
+ int num_channels;
+ num_channels = _hcd->core_if->core_params->host_channels;
+ if ((_hcd->periodic_channels + _hcd->non_periodic_channels <
+ num_channels) && (_hcd->periodic_channels < num_channels - 1)) {
+ status = 0;
+ } else {
+ DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
+ __func__, num_channels, _hcd->periodic_channels,
+ _hcd->non_periodic_channels);
+ status = -ENOSPC;
+ }
+ return status;
+}
+
+/**
+ * Checks that there is sufficient bandwidth for the specified QH in the
+ * periodic schedule. For simplicity, this calculation assumes that all the
+ * transfers in the periodic schedule may occur in the same (micro)frame.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH containing periodic bandwidth required.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_periodic_bandwidth(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int status;
+ uint16_t max_claimed_usecs;
+ status = 0;
+ if (_hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) {
+ /*
+ * High speed mode.
+ * Max periodic usecs is 80% x 125 usec = 100 usec.
+ */
+ max_claimed_usecs = 100 - _qh->usecs;
+ } else {
+ /*
+ * Full speed mode.
+ * Max periodic usecs is 90% x 1000 usec = 900 usec.
+ */
+ max_claimed_usecs = 900 - _qh->usecs;
+ }
+ if (_hcd->periodic_usecs > max_claimed_usecs) {
+#undef USB_DWC_OTG_IGNORE_BANDWIDTH
+#ifndef USB_DWC_OTG_IGNORE_BANDWIDTH
+ DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n",
+ __func__, _hcd->periodic_usecs, _qh->usecs);
+ status = -ENOSPC;
+#else
+ status = 0;
+#endif
+ }
+ return status;
+}
+
+/**
+ * Checks that the max transfer size allowed in a host channel is large enough
+ * to handle the maximum data transfer in a single (micro)frame for a periodic
+ * transfer.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for a periodic endpoint.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_max_xfer_size(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int status;
+ uint32_t max_xfer_size;
+ uint32_t max_channel_xfer_size;
+ status = 0;
+ max_xfer_size = dwc_max_packet(_qh->maxp) * dwc_hb_mult(_qh->maxp);
+ max_channel_xfer_size = _hcd->core_if->core_params->max_transfer_size;
+ if (max_xfer_size > max_channel_xfer_size) {
+ DWC_NOTICE("%s: Periodic xfer length %d > "
+ "max xfer length for channel %d\n", __func__,
+ max_xfer_size, max_channel_xfer_size);
+ status = -ENOSPC;
+ }
+ return status;
+}
+
+/**
+ * Schedules an interrupt or isochronous transfer in the periodic schedule.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for the periodic transfer. The QH should already contain the
+ * scheduling information.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int schedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int status = 0;
+ status = periodic_channel_available(_hcd);
+ if (status) {
+ DWC_NOTICE("%s: No host channel available for periodic "
+ "transfer.\n", __func__);
+ return status;
+ }
+ status = check_periodic_bandwidth(_hcd, _qh);
+ if (status) {
+ DWC_NOTICE("%s: Insufficient periodic bandwidth for "
+ "periodic transfer.\n", __func__);
+ return status;
+ }
+ status = check_max_xfer_size(_hcd, _qh);
+ if (status) {
+ DWC_NOTICE("%s: Channel max transfer size too small "
+ "for periodic transfer.\n", __func__);
+ return status;
+ }
+
+ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sched_inactive);
+
+ /* Reserve the periodic channel. */
+ _hcd->periodic_channels++;
+
+ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs += _qh->usecs;
+
+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated +=
+ _qh->usecs / _qh->interval;
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs++;
+ DWC_DEBUGPL(DBG_HCD,
+ "Scheduled intr: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs++;
+ DWC_DEBUGPL(DBG_HCD,
+ "Scheduled isoc: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ }
+ return status;
+}
+
+/**
+ * This function adds a QH to either the non periodic or periodic schedule if
+ * it is not already in the schedule. If the QH is already in the schedule, no
+ * action is taken.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qh_add(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ unsigned long flags;
+ int status = 0;
+ local_irq_save(flags);
+ if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */
+ goto done;
+ }
+
+ /* Add the new QH to the appropriate schedule */
+ if (dwc_qh_is_non_per(_qh)) {
+ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry,
+ &_hcd->non_periodic_sched_inactive);
+ } else {
+ status = schedule_periodic(_hcd, _qh);
+ }
+
+done:local_irq_restore(flags);
+ return status;
+}
+/**
+ * This function adds a QH to the non periodic deferred schedule.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qh_add_deferred(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */
+ goto done;
+ }
+
+ /* Add the new QH to the non periodic deferred schedule */
+ if (dwc_qh_is_non_per(_qh)) {
+ list_add_tail(&_qh->qh_list_entry,
+ &_hcd->non_periodic_sched_deferred);
+ }
+done:
+ local_irq_restore(flags);
+ return 0;
+}
+
+/**
+ * Removes an interrupt or isochronous transfer from the periodic schedule.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for the periodic transfer.
+ */
+static void deschedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ list_del_init(&_qh->qh_list_entry);
+
+ /* Release the periodic channel reservation. */
+ _hcd->periodic_channels--;
+
+ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs -= _qh->usecs;
+
+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated -=
+ _qh->usecs / _qh->interval;
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs--;
+ DWC_DEBUGPL(DBG_HCD,
+ "Descheduled intr: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs--;
+ DWC_DEBUGPL(DBG_HCD,
+ "Descheduled isoc: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ }
+}
+
+/**
+ * Removes a QH from either the non-periodic or periodic schedule. Memory is
+ * not freed.
+ *
+ * @param[in] _hcd The HCD state structure.
+ * @param[in] _qh QH to remove from schedule. */
+void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (list_empty(&_qh->qh_list_entry)) {
+ /* QH is not in a schedule. */
+ goto done;
+ }
+ if (dwc_qh_is_non_per(_qh)) {
+ if (_hcd->non_periodic_qh_ptr == &_qh->qh_list_entry) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ list_del_init(&_qh->qh_list_entry);
+ } else {
+ deschedule_periodic(_hcd, _qh);
+ }
+
+done:local_irq_restore(flags);
+}
+
+/**
+ * Defers a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the deferred non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ */
+int dwc_otg_hcd_qh_deferr(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh, int delay)
+{
+ int deact = 1;
+ unsigned long flags;
+ local_irq_save(flags);
+ if (dwc_qh_is_non_per(_qh)) {
+ _qh->sched_frame =
+ dwc_frame_num_inc(_hcd->frame_number,
+ delay);
+ _qh->channel = NULL;
+ _qh->qtd_in_process = NULL;
+ deact = 0;
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to deferred non-periodic schedule. */
+ dwc_otg_hcd_qh_add_deferred(_hcd, _qh);
+ }
+ }
+ local_irq_restore(flags);
+ return deact;
+}
+/**
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh,
+ int sched_next_periodic_split)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (dwc_qh_is_non_per(_qh)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to inactive non-periodic schedule. */
+ dwc_otg_hcd_qh_add(_hcd, _qh);
+ }
+ } else {
+ uint16_t frame_number =
+ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
+ if (_qh->do_split) {
+ /* Schedule the next continuing periodic split transfer */
+ if (sched_next_periodic_split) {
+ _qh->sched_frame = frame_number;
+ if (dwc_frame_num_le(frame_number,
+ dwc_frame_num_inc(_qh->start_split_frame,1))) {
+ /*
+ * Allow one frame to elapse after start
+ * split microframe before scheduling
+ * complete split, but DONT if we are
+ * doing the next start split in the
+ * same frame for an ISOC out.
+ */
+ if ((_qh->ep_type != USB_ENDPOINT_XFER_ISOC)
+ || (_qh->ep_is_in != 0)) {
+ _qh->sched_frame = dwc_frame_num_inc(_qh->sched_frame,1);
+ }
+ }
+ } else {
+ _qh->sched_frame = dwc_frame_num_inc(_qh->start_split_frame,
+ _qh->interval);
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
+ _qh->sched_frame = frame_number;
+ }
+ _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh->sched_frame;
+ }
+ } else {
+ _qh->sched_frame =
+ dwc_frame_num_inc(_qh->sched_frame, _qh->interval);
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
+ _qh->sched_frame = frame_number;
+ }
+ }
+ if (list_empty(&_qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ } else {
+ /*
+ * Remove from periodic_sched_queued and move to
+ * appropriate queue.
+ */
+ if (_qh->sched_frame == frame_number) {
+ list_move(&_qh->qh_list_entry,
+ &_hcd->periodic_sched_ready);
+ } else {
+ list_move(&_qh->qh_list_entry,
+ &_hcd->periodic_sched_inactive);
+ }
+ }
+ }
+ local_irq_restore(flags);
+}
+
+/**
+ * This function allocates and initializes a QTD.
+ *
+ * @param[in] _urb The URB to create a QTD from. Each URB-QTD pair will end up
+ * pointing to each other so each pair should have a unique correlation.
+ *
+ * @return Returns pointer to the newly allocated QTD, or NULL on error. */
+dwc_otg_qtd_t * dwc_otg_hcd_qtd_create(struct urb *_urb)
+{
+ dwc_otg_qtd_t * qtd;
+ qtd = dwc_otg_hcd_qtd_alloc();
+ if (qtd == NULL) {
+ return NULL;
+ }
+ dwc_otg_hcd_qtd_init(qtd, _urb);
+ return qtd;
+}
+
+/**
+ * Initializes a QTD structure.
+ *
+ * @param[in] _qtd The QTD to initialize.
+ * @param[in] _urb The URB to use for initialization. */
+void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t * _qtd, struct urb *_urb)
+{
+ memset(_qtd, 0, sizeof(dwc_otg_qtd_t));
+ _qtd->urb = _urb;
+ if (usb_pipecontrol(_urb->pipe)) {
+ /*
+ * The only time the QTD data toggle is used is on the data
+ * phase of control transfers. This phase always starts with
+ * DATA1.
+ */
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+ _qtd->control_phase = DWC_OTG_CONTROL_SETUP;
+ }
+
+ /* start split */
+ _qtd->complete_split = 0;
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ _qtd->isoc_split_offset = 0;
+
+ /* Store the qtd ptr in the urb to reference what QTD. */
+ _urb->hcpriv = _qtd;
+ return;
+}
+
+/**
+ * This function adds a QTD to the QTD-list of a QH. It will find the correct
+ * QH to place the QTD into. If it does not find a QH, then it will create a
+ * new QH. If the QH to which the QTD is added is not currently scheduled, it
+ * is placed into the proper schedule based on its EP type.
+ *
+ * @param[in] _qtd The QTD to add
+ * @param[in] _dwc_otg_hcd The DWC HCD structure
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t * _qtd, dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ struct usb_host_endpoint *ep;
+ dwc_otg_qh_t * qh;
+ unsigned long flags;
+ int retval = 0;
+ struct urb *urb = _qtd->urb;
+ local_irq_save(flags);
+
+ /*
+ * Get the QH which holds the QTD-list to insert to. Create QH if it
+ * doesn't exist.
+ */
+ ep = dwc_urb_to_endpoint(urb);
+ qh = (dwc_otg_qh_t *) ep->hcpriv;
+ if (qh == NULL) {
+ qh = dwc_otg_hcd_qh_create(_dwc_otg_hcd, urb);
+ if (qh == NULL) {
+ retval = -1;
+ goto done;
+ }
+ ep->hcpriv = qh;
+ }
+ _qtd->qtd_qh_ptr = qh;
+ retval = dwc_otg_hcd_qh_add(_dwc_otg_hcd, qh);
+ if (retval == 0) {
+ list_add_tail(&_qtd->qtd_list_entry, &qh->qtd_list);
+ }
+
+done:
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c
index 4bd17de7939..ba4435385e5 100644
--- a/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c
@@ -1331,6 +1331,9 @@ void dwc_otg_pcd_remove(struct device *_dev)
int usb_gadget_register_driver(struct usb_gadget_driver *_driver)
{
int retval;
+ dctl_data_t dctl;
+ uint32_t *addr = NULL;
+
DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n",
_driver->driver.name);
if (!_driver || _driver->speed == USB_SPEED_UNKNOWN || !_driver->bind
@@ -1376,6 +1379,16 @@ int usb_gadget_register_driver(struct usb_gadget_driver *_driver)
}
DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n",
_driver->driver.name);
+
+ /* do soft-disconnect */
+ addr = (uint32_t *)&(GET_CORE_IF(s_pcd)->dev_if->dev_global_regs->dctl);
+ dctl.d32 = dwc_read_reg32(addr);
+ dctl.b.sftdiscon = 1;
+ dwc_write_reg32(addr, dctl.d32);
+ msleep(2000);
+ dctl.b.sftdiscon = 0;
+ dwc_write_reg32(addr, dctl.d32);
+
return 0;
}
EXPORT_SYMBOL(usb_gadget_register_driver);
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c.org b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c.org
new file mode 100644
index 00000000000..4bd17de7939
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c.org
@@ -0,0 +1,1408 @@
+ /* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_pcd.c $
+ * $Revision: #18 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef CONFIG_DWC_HOST_ONLY
+
+/** @file
+ * This file implements the Peripheral Controller Driver.
+ *
+ * The Peripheral Controller Driver (PCD) is responsible for
+ * translating requests from the Function Driver into the appropriate
+ * actions on the DWC_otg controller. It isolates the Function Driver
+ * from the specifics of the controller by providing an API to the
+ * Function Driver.
+ *
+ * The Peripheral Controller Driver for Linux will implement the
+ * Gadget API, so that the existing Gadget drivers can be used.
+ * (Gadget Driver is the Linux terminology for a Function Driver.)
+ *
+ * The Linux Gadget API is defined in the header file
+ * <code><linux/usb/gadget.h></code>. The USB EP operations API is
+ * defined in the structure <code>usb_ep_ops</code> and the USB
+ * Controller API is defined in the structure
+ * <code>usb_gadget_ops</code>.
+ *
+ * An important function of the PCD is managing interrupts generated
+ * by the DWC_otg controller. The implementation of the DWC_otg device
+ * mode interrupt service routines is in dwc_otg_pcd_intr.c.
+ *
+ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
+ * @todo Does it work when the request size is greater than DEPTSIZ
+ * transfer size
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_pcd.h"
+
+/**
+ * Static PCD pointer for use in usb_gadget_register_driver and
+ * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init.
+ */
+static dwc_otg_pcd_t *s_pcd = 0;
+
+/* Display the contents of the buffer */
+extern void dump_msg(const u8 * buf, unsigned int length);
+
+/**
+ * This function completes a request. It call's the request call back.
+ */
+void request_done(dwc_otg_pcd_ep_t * _ep, dwc_otg_pcd_request_t * _req,
+ int _status)
+{
+ unsigned stopped = _ep->stopped;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep);
+ if (_req->mapped) {
+ dma_unmap_single(_ep->pcd->gadget.dev.parent,
+ _req->req.dma, _req->req.length,
+ _ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ _req->req.dma = DMA_ADDR_INVALID;
+ _req->mapped = 0;
+ } else
+ dma_sync_single_for_cpu(_ep->pcd->gadget.dev.parent,
+ _req->req.dma, _req->req.length,
+ _ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ list_del_init(&_req->queue);
+ if (_req->req.status == -EINPROGRESS) {
+ _req->req.status = _status;
+ } else {
+ _status = _req->req.status;
+ }
+
+ /* don't modify queue heads during completion callback */
+ _ep->stopped = 1;
+ SPIN_UNLOCK(&_ep->pcd->lock);
+ _req->req.complete(&_ep->ep, &_req->req);
+ SPIN_LOCK(&_ep->pcd->lock);
+ if (_ep->pcd->request_pending > 0) {
+ --_ep->pcd->request_pending;
+ }
+ _ep->stopped = stopped;
+
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Finally, when the current request is done, mark this endpoint
+ * as not active, so that new requests can be processed.
+ */
+ _ep->dwc_ep.active = 0;
+#endif
+}
+
+/**
+ * This function terminates all the requsts in the EP request queue.
+ */
+void request_nuke(dwc_otg_pcd_ep_t * _ep)
+{
+ dwc_otg_pcd_request_t * req;
+ _ep->stopped = 1;
+ /* called with irqs blocked?? */
+ while (!list_empty(&_ep->queue)) {
+ req = list_entry(_ep->queue.next, dwc_otg_pcd_request_t, queue);
+ request_done(_ep, req, -ESHUTDOWN);
+ }
+}
+
+/* USB Endpoint Operations */
+/*
+ * The following sections briefly describe the behavior of the Gadget
+ * API endpoint operations implemented in the DWC_otg driver
+ * software. Detailed descriptions of the generic behavior of each of
+ * these functions can be found in the Linux header file
+ * include/linux/usb_gadget.h.
+ *
+ * The Gadget API provides wrapper functions for each of the function
+ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper
+ * function, which then calls the underlying PCD function. The
+ * following sections are named according to the wrapper
+ * functions. Within each section, the corresponding DWC_otg PCD
+ * function name is specified.
+ *
+ */
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
+{
+ uint32_t PerTxMsk = 1;
+ int i;
+ for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
+ if ((PerTxMsk & core_if->p_tx_msk) == 0) {
+ core_if->p_tx_msk |= PerTxMsk;
+ return i + 1;
+ }
+ PerTxMsk <<= 1;
+ }
+ return 0;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
+ uint32_t fifo_num)
+{
+ core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
+}
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
+{
+ uint32_t TxMsk = 1;
+ int i;
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
+ if ((TxMsk & core_if->tx_msk) == 0) {
+ core_if->tx_msk |= TxMsk;
+ return i + 1;
+ }
+ TxMsk <<= 1;
+ }
+ return 0;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
+{
+ core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
+}
+
+/**
+ * This function is called by the Gadget Driver for each EP to be
+ * configured for the current configuration (SET_CONFIGURATION).
+ *
+ * This function initializes the dwc_otg_ep_t data structure, and then
+ * calls dwc_otg_ep_activate.
+ */
+static int dwc_otg_pcd_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *_desc)
+{
+ dwc_otg_pcd_ep_t * ep = 0;
+ dwc_otg_pcd_t * pcd = 0;
+ unsigned long flags;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _desc);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || !_desc || ep->desc
+ || _desc->bDescriptorType != USB_DT_ENDPOINT) {
+ DWC_WARN("%s, bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+ if (ep == &ep->pcd->ep0) {
+ DWC_WARN("%s, bad ep(0)\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check FIFO size? */
+ if (!_desc->wMaxPacketSize) {
+ DWC_WARN("%s, bad %s maxpacket\n", __func__, _ep->name);
+ return -ERANGE;
+ }
+ pcd = ep->pcd;
+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+ DWC_WARN("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
+ ep->desc = _desc;
+ ep->ep.maxpacket = le16_to_cpu(_desc->wMaxPacketSize);
+
+ /*
+ * Activate the EP
+ */
+ ep->stopped = 0;
+ ep->dwc_ep.is_in = (USB_DIR_IN & _desc->bEndpointAddress) != 0;
+ ep->dwc_ep.maxpacket = ep->ep.maxpacket;
+ ep->dwc_ep.type = _desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ if (ep->dwc_ep.is_in) {
+ if (!pcd->otg_dev->core_if->en_multiple_tx_fifo) {
+ ep->dwc_ep.tx_fifo_num = 0;
+ if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_ISOC) {
+ /*
+ * if ISOC EP then assign a Periodic Tx FIFO.
+ */
+ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if);
+ }
+ } else {
+ /*
+ * if Dedicated FIFOs mode is on then assign a Tx FIFO.
+ */
+ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if);
+ }
+ }
+
+ /* Set initial data PID. */
+ if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK) {
+ ep->dwc_ep.data_pid_start = 0;
+ }
+ DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n",
+ ep->ep.name, (ep->dwc_ep.is_in ? "IN" : "OUT"),
+ ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
+ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return 0;
+}
+
+
+/**
+ * This function is called when an EP is disabled due to disconnect or
+ * change in configuration. Any pending requests will terminate with a
+ * status of -ESHUTDOWN.
+ *
+ * This function modifies the dwc_otg_ep_t data structure for this EP,
+ * and then calls dwc_otg_ep_deactivate.
+ */
+static int dwc_otg_pcd_ep_disable(struct usb_ep *_ep)
+{
+ dwc_otg_pcd_ep_t * ep;
+ unsigned long flags;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || !ep->desc) {
+ DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__,
+ _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
+ request_nuke(ep);
+ dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
+ ep->desc = 0;
+ ep->stopped = 1;
+ if (ep->dwc_ep.is_in) {
+ release_perio_tx_fifo(GET_CORE_IF(ep->pcd),ep->dwc_ep.tx_fifo_num);
+ release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num);
+ }
+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags);
+ DWC_DEBUGPL(DBG_PCD, "%s disabled\n", _ep->name);
+ return 0;
+}
+
+
+/**
+ * This function allocates a request object to use with the specified
+ * endpoint.
+ *
+ * @param _ep The endpoint to be used with with the request
+ * @param _gfp_flags the GFP_* flags to use.
+ */
+static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *_ep,
+ gfp_t _gfp_flags)
+{
+ dwc_otg_pcd_request_t * req;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%d)\n", __func__, _ep, _gfp_flags);
+ if (0 == _ep) {
+ DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n");
+ return 0;
+ }
+ req = kmalloc(sizeof(dwc_otg_pcd_request_t), _gfp_flags);
+ if (0 == req) {
+ DWC_WARN("%s() %s\n", __func__,"request allocation failed!\n");
+ return 0;
+ }
+ memset(req, 0, sizeof(dwc_otg_pcd_request_t));
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+ return &req->req;
+}
+
+
+/**
+ * This function frees a request object.
+ *
+ * @param _ep The endpoint associated with the request
+ * @param _req The request being freed
+ */
+static void dwc_otg_pcd_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ dwc_otg_pcd_request_t * req;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _req);
+ if (0 == _ep || 0 == _req) {
+ DWC_WARN("%s() %s\n", __func__,"Invalid ep or req argument!\n");
+ return;
+ }
+ req = container_of(_req, dwc_otg_pcd_request_t, req);
+ kfree(req);
+}
+
+
+/**
+ * This function is used to submit an I/O Request to an EP.
+ *
+ * - When the request completes the request's completion callback
+ * is called to return the request to the driver.
+ * - An EP, except control EPs, may have multiple requests
+ * pending.
+ * - Once submitted the request cannot be examined or modified.
+ * - Each request is turned into one or more packets.
+ * - A BULK EP can queue any amount of data; the transfer is
+ * packetized.
+ * - Zero length Packets are specified with the request 'zero'
+ * flag.
+ */
+static int dwc_otg_pcd_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t _gfp_flags)
+{
+ int prevented = 0;
+ dwc_otg_pcd_request_t * req;
+ dwc_otg_pcd_ep_t * ep;
+ dwc_otg_pcd_t * pcd;
+ unsigned long flags = 0;
+
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p,%d)\n", __func__, _ep, _req,
+ _gfp_flags);
+ req = container_of(_req, dwc_otg_pcd_request_t, req);
+ if (!_req || !_req->complete || !_req->buf
+ || !list_empty(&req->queue)) {
+ DWC_WARN("%s, bad params\n", __func__);
+ return -EINVAL;
+ }
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || (!ep->desc && ep->dwc_ep.num != 0)) {
+ DWC_WARN("%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+ pcd = ep->pcd;
+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
+ DWC_WARN("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+ DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", _ep->name,
+ _req, _req->length, _req->buf);
+ if (!GET_CORE_IF(pcd)->core_params->opt) {
+ if (ep->dwc_ep.num != 0) {
+ DWC_ERROR("%s queue req %p, len %d buf %p\n",
+ _ep->name, _req, _req->length, _req->buf);
+ }
+ }
+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
+
+#if defined(CONFIG_DWC_DEBUG) & defined(VERBOSE)
+ dump_msg(_req->buf, _req->length);
+
+#endif /* */
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /*
+ * For EP0 IN without premature status, zlp is required?
+ */
+ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
+ DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", _ep->name);
+ //_req->zero = 1;
+ }
+
+ /* map virtual address to hardware */
+ if (_req->dma == DMA_ADDR_INVALID) {
+ _req->dma = dma_map_single(ep->pcd->gadget.dev.parent,
+ _req->buf,
+ _req->length,
+ ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ req->mapped = 1;
+ } else {
+ dma_sync_single_for_device(ep->pcd->gadget.dev.parent,
+ _req->dma, _req->length,
+ ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ req->mapped = 0;
+ }
+
+ /* Start the transfer */
+ if (list_empty(&ep->queue) && !ep->stopped) {
+ /* EP0 Transfer? */
+ if (ep->dwc_ep.num == 0) {
+ switch (pcd->ep0state) {
+ case EP0_IN_DATA_PHASE:
+ DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_IN_DATA_PHASE\n",
+ __func__);
+ break;
+ case EP0_OUT_DATA_PHASE:
+ DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_OUT_DATA_PHASE\n",
+ __func__);
+ if (pcd->request_config) {
+ /* Complete STATUS PHASE */
+ ep->dwc_ep.is_in = 1;
+ pcd->ep0state = EP0_STATUS;
+ }
+ break;
+ default:
+ DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
+ pcd->ep0state);
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return -EL2HLT;
+ }
+ ep->dwc_ep.dma_addr = _req->dma;
+ ep->dwc_ep.start_xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_len = _req->length;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
+ &ep->dwc_ep);
+ } else {
+ /* Setup and start the Transfer */
+ ep->dwc_ep.dma_addr = _req->dma;
+ ep->dwc_ep.start_xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_len = _req->length;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
+ dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
+ &ep->dwc_ep);
+ }
+ }
+ if ((req != 0) || prevented) {
+ ++pcd->request_pending;
+ list_add_tail(&req->queue, &ep->queue);
+ if (ep->dwc_ep.is_in && ep->stopped
+ && !(GET_CORE_IF(pcd)->dma_enable)) {
+ /** @todo NGS Create a function for this. */
+ diepmsk_data_t diepmsk = {.d32 = 0};
+ diepmsk.b.intktxfemp = 1;
+ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0,
+ diepmsk.d32);
+ }
+ }
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return 0;
+}
+
+/**
+ * This function cancels an I/O request from an EP.
+ */
+static int dwc_otg_pcd_ep_dequeue(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ dwc_otg_pcd_request_t * req;
+ dwc_otg_pcd_ep_t * ep;
+ dwc_otg_pcd_t * pcd;
+ unsigned long flags;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _req);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || !_req || (!ep->desc && ep->dwc_ep.num != 0)) {
+ DWC_WARN("%s, bad argument\n", __func__);
+ return -EINVAL;
+ }
+ pcd = ep->pcd;
+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+ DWC_WARN("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
+ DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, _ep->name,
+ ep->dwc_ep.is_in ? "IN" : "OUT", _req);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req) {
+ break;
+ }
+ }
+ if (&req->req != _req) {
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return -EINVAL;
+ }
+ if (!list_empty(&req->queue)) {
+ request_done(ep, req, -ECONNRESET);
+ } else {
+ req = 0;
+ }
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return req ? 0 : -EOPNOTSUPP;
+}
+
+/**
+ * usb_ep_set_halt stalls an endpoint.
+ *
+ * usb_ep_clear_halt clears an endpoint halt and resets its data
+ * toggle.
+ *
+ * Both of these functions are implemented with the same underlying
+ * function. The behavior depends on the value argument.
+ *
+ * @param[in] _ep the Endpoint to halt or clear halt.
+ * @param[in] _value
+ * - 0 means clear_halt.
+ * - 1 means set_halt,
+ * - 2 means clear stall lock flag.
+ * - 3 means set stall lock flag.
+ */
+static int dwc_otg_pcd_ep_set_halt(struct usb_ep *_ep, int _value)
+{
+ int retval = 0;
+ unsigned long flags;
+ dwc_otg_pcd_ep_t * ep = 0;
+ DWC_DEBUGPL(DBG_PCD, "HALT %s %d\n", _ep->name, _value);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || (!ep->desc && ep != &ep->pcd->ep0)
+ || ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ DWC_WARN("%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
+ if (ep->dwc_ep.is_in && !list_empty(&ep->queue)) {
+ DWC_WARN("%s() %s XFer In process\n", __func__, _ep->name);
+ retval = -EAGAIN;
+ } else if (_value == 0) {
+ dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if,&ep->dwc_ep);
+ } else if (_value == 1) {
+ if (ep->dwc_ep.num == 0) {
+ ep->pcd->ep0state = EP0_STALL;
+ }
+ ep->stopped = 1;
+ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, &ep->dwc_ep);
+ } else if (_value == 2) {
+ ep->dwc_ep.stall_clear_flag = 0;
+ } else if (_value == 3) {
+ ep->dwc_ep.stall_clear_flag = 1;
+ }
+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags);
+ return retval;
+}
+
+static struct usb_ep_ops dwc_otg_pcd_ep_ops =
+{
+ .enable = dwc_otg_pcd_ep_enable,
+ .disable = dwc_otg_pcd_ep_disable,
+ .alloc_request = dwc_otg_pcd_alloc_request,
+ .free_request = dwc_otg_pcd_free_request,
+ .queue = dwc_otg_pcd_ep_queue,
+ .dequeue = dwc_otg_pcd_ep_dequeue,
+ .set_halt = dwc_otg_pcd_ep_set_halt,
+ .fifo_status = 0,
+ .fifo_flush = 0,
+};
+
+/* Gadget Operations */
+/**
+ * The following gadget operations will be implemented in the DWC_otg
+ * PCD. Functions in the API that are not described below are not
+ * implemented.
+ *
+ * The Gadget API provides wrapper functions for each of the function
+ * pointers defined in usb_gadget_ops. The Gadget Driver calls the
+ * wrapper function, which then calls the underlying PCD function. The
+ * following sections are named according to the wrapper functions
+ * (except for ioctl, which doesn't have a wrapper function). Within
+ * each section, the corresponding DWC_otg PCD function name is
+ * specified.
+ *
+ */
+
+/**
+ *Gets the USB Frame number of the last SOF.
+ */
+static int dwc_otg_pcd_get_frame(struct usb_gadget *_gadget)
+{
+ dwc_otg_pcd_t * pcd;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _gadget);
+ if (_gadget == 0) {
+ return -ENODEV;
+ } else {
+ pcd = container_of(_gadget, dwc_otg_pcd_t, gadget);
+ dwc_otg_get_frame_number(GET_CORE_IF(pcd));
+ }
+ return 0;
+}
+void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * _pcd)
+{
+ uint32_t * addr = (uint32_t *) &(GET_CORE_IF(_pcd)->core_global_regs->gotgctl);
+ gotgctl_data_t mem;
+ gotgctl_data_t val;
+ val.d32 = dwc_read_reg32(addr);
+ if (val.b.sesreq) {
+ DWC_ERROR("Session Request Already active!\n");
+ return;
+ }
+ DWC_NOTICE("Session Request Initated\n");
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.sesreq = 1;
+ dwc_write_reg32(addr, mem.d32);
+
+ /* Start the SRP timer */
+ dwc_otg_pcd_start_srp_timer(_pcd);
+ return;
+}
+void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * _pcd, int set)
+{
+ dctl_data_t dctl = {.d32 = 0};
+ volatile uint32_t *addr = &(GET_CORE_IF(_pcd)->dev_if->dev_global_regs->dctl);
+ if (dwc_otg_is_device_mode(GET_CORE_IF(_pcd))) {
+ if (_pcd->remote_wakeup_enable) {
+ if (set) {
+ dctl.b.rmtwkupsig = 1;
+ dwc_modify_reg32(addr, 0, dctl.d32);
+ DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
+ mdelay(1);
+ dwc_modify_reg32(addr, dctl.d32, 0);
+ DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
+ } else {
+ }
+ } else {
+ DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
+ }
+ }
+ return;
+}
+
+
+/**
+ * Initiates Session Request Protocol (SRP) to wakeup the host if no
+ * session is in progress. If a session is already in progress, but
+ * the device is suspended, remote wakeup signaling is started.
+ *
+ */
+static int dwc_otg_pcd_wakeup(struct usb_gadget *_gadget)
+{
+ unsigned long flags;
+ dwc_otg_pcd_t * pcd;
+ dsts_data_t dsts;
+ gotgctl_data_t gotgctl;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _gadget);
+ if (_gadget == 0) {
+ return -ENODEV;
+ } else {
+ pcd = container_of(_gadget, dwc_otg_pcd_t, gadget);
+ }
+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
+
+ /*
+ * This function starts the Protocol if no session is in progress. If
+ * a session is already in progress, but the device is suspended,
+ * remote wakeup signaling is started.
+ */
+
+ /* Check if valid session */
+ gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
+ if (gotgctl.b.bsesvld) {
+
+ /* Check if suspend state */
+ dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts));
+ if (dsts.b.suspsts) {
+ dwc_otg_pcd_remote_wakeup(pcd, 1);
+ }
+ } else {
+ dwc_otg_pcd_initiate_srp(pcd);
+ }
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return 0;
+}
+
+static const struct usb_gadget_ops dwc_otg_pcd_ops =
+{
+ .get_frame = dwc_otg_pcd_get_frame,
+ .wakeup = dwc_otg_pcd_wakeup,
+ // current versions must always be self-powered
+};
+
+/**
+ * This function updates the otg values in the gadget structure.
+ */
+void dwc_otg_pcd_update_otg(dwc_otg_pcd_t * _pcd, const unsigned _reset)
+{
+ if (!_pcd->gadget.is_otg)
+ return;
+ if (_reset) {
+ _pcd->b_hnp_enable = 0;
+ _pcd->a_hnp_support = 0;
+ _pcd->a_alt_hnp_support = 0;
+ }
+ _pcd->gadget.b_hnp_enable = _pcd->b_hnp_enable;
+ _pcd->gadget.a_hnp_support = _pcd->a_hnp_support;
+ _pcd->gadget.a_alt_hnp_support = _pcd->a_alt_hnp_support;
+}
+
+/**
+ * This function is the top level PCD interrupt handler.
+ */
+static irqreturn_t dwc_otg_pcd_irq(int _irq, void *_dev)
+{
+ dwc_otg_pcd_t * pcd = _dev;
+ int32_t retval = IRQ_NONE;
+ retval = dwc_otg_pcd_handle_intr(pcd);
+ return IRQ_RETVAL(retval);
+}
+
+/**
+ * PCD Callback function for initializing the PCD when switching to
+ * device mode.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_start_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+
+ /*
+ * Initialized the Core for Device mode.
+ */
+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
+ dwc_otg_core_dev_init(GET_CORE_IF(pcd));
+ }
+ return 1;
+}
+
+/**
+ * PCD Callback function for stopping the PCD when switching to Host
+ * mode.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_stop_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
+ dwc_otg_pcd_stop(pcd);
+ return 1;
+}
+
+/**
+ * PCD Callback function for notifying the PCD when resuming from
+ * suspend.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_suspend_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+ if (pcd->driver && pcd->driver->resume) {
+ SPIN_UNLOCK(&pcd->lock);
+ pcd->driver->suspend(&pcd->gadget);
+ SPIN_LOCK(&pcd->lock);
+ }
+ return 1;
+}
+
+
+/**
+ * PCD Callback function for notifying the PCD when resuming from
+ * suspend.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_resume_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+ if (pcd->driver && pcd->driver->resume) {
+ SPIN_UNLOCK(&pcd->lock);
+ pcd->driver->resume(&pcd->gadget);
+ SPIN_LOCK(&pcd->lock);
+ }
+
+ /* Stop the SRP timeout timer. */
+ if ((GET_CORE_IF(pcd)->core_params->phy_type !=
+ DWC_PHY_TYPE_PARAM_FS) || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
+ if (GET_CORE_IF(pcd)->srp_timer_started) {
+ GET_CORE_IF(pcd)->srp_timer_started = 0;
+ del_timer(&pcd->srp_timer);
+ }
+ }
+ return 1;
+}
+
+/**
+ * PCD Callback structure for handling mode switching.
+ */
+static dwc_otg_cil_callbacks_t pcd_callbacks =
+{
+ .start = dwc_otg_pcd_start_cb,
+ .stop = dwc_otg_pcd_stop_cb,
+ .suspend = dwc_otg_pcd_suspend_cb,
+ .resume_wakeup = dwc_otg_pcd_resume_cb,
+ .p = 0, /* Set at registration */
+};
+
+/**
+ * This function is called when the SRP timer expires. The SRP should
+ * complete within 6 seconds.
+ */
+static void srp_timeout(unsigned long _ptr)
+{
+ gotgctl_data_t gotgctl;
+ dwc_otg_core_if_t * core_if = (dwc_otg_core_if_t *) _ptr;
+ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
+ gotgctl.d32 = dwc_read_reg32(addr);
+ core_if->srp_timer_started = 0;
+ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
+ (core_if->core_params->i2c_enable)) {
+ DWC_PRINT("SRP Timeout\n");
+ if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
+ core_if->pcd_cb->resume_wakeup(core_if->
+ pcd_cb->p);
+ }
+
+ /* Clear Session Request */
+ gotgctl.d32 = 0;
+ gotgctl.b.sesreq = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->gotgctl,gotgctl.d32, 0);
+ core_if->srp_success = 0;
+ } else {
+ DWC_ERROR("Device not connected/responding\n");
+ gotgctl.b.sesreq = 0;
+ dwc_write_reg32(addr, gotgctl.d32);
+ }
+ } else if (gotgctl.b.sesreq) {
+ DWC_PRINT("SRP Timeout\n");
+ DWC_ERROR("Device not connected/responding\n");
+ gotgctl.b.sesreq = 0;
+ dwc_write_reg32(addr, gotgctl.d32);
+ } else {
+ DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32);
+ }
+}
+
+/**
+ * Start the SRP timer to detect when the SRP does not complete within
+ * 6 seconds.
+ *
+ * @param _pcd the pcd structure.
+ */
+void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t * _pcd)
+{
+ struct timer_list *srp_timer = &_pcd->srp_timer;
+ GET_CORE_IF(_pcd)->srp_timer_started = 1;
+ init_timer(srp_timer);
+ srp_timer->function = srp_timeout;
+ srp_timer->data = (unsigned long)GET_CORE_IF(_pcd);
+ srp_timer->expires = jiffies + (HZ * 6);
+ add_timer(srp_timer);
+}
+
+/**
+ * Tasklet
+ *
+ */
+extern void start_next_request(dwc_otg_pcd_ep_t * _ep);
+
+static void start_xfer_tasklet_func(unsigned long data)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) data;
+ dwc_otg_core_if_t * core_if = pcd->otg_dev->core_if;
+ int i;
+ depctl_data_t diepctl;
+ DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
+ diepctl.d32 =
+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+ if (pcd->ep0.queue_sof) {
+ pcd->ep0.queue_sof = 0;
+ start_next_request(&pcd->ep0);
+ // break;
+ }
+ for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
+ depctl_data_t diepctl;
+ diepctl.d32 =
+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl);
+ if (pcd->in_ep[i].queue_sof) {
+ pcd->in_ep[i].queue_sof = 0;
+ start_next_request(&pcd->in_ep[i]);
+ // break;
+ }
+ }
+ return;
+}
+
+static struct tasklet_struct start_xfer_tasklet =
+{
+ .next = NULL,
+ .state = 0,
+ .count = ATOMIC_INIT(0),
+ .func = start_xfer_tasklet_func,
+ .data = 0,
+};
+
+/**
+ * This function initialized the pcd Dp structures to there default
+ * state.
+ *
+ * @param _pcd the pcd structure.
+ */
+void dwc_otg_pcd_reinit(dwc_otg_pcd_t * _pcd)
+{
+ static const char *names[] =
+ {
+ "ep0", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
+ "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in", "ep12in", "ep13in",
+ "ep14in", "ep15in", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
+ "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out", "ep12out",
+ "ep13out", "ep14out", "ep15out"
+ };
+
+ int i;
+ int in_ep_cntr, out_ep_cntr;
+ uint32_t hwcfg1;
+ uint32_t num_in_eps = (GET_CORE_IF(_pcd))->dev_if->num_in_eps;
+ uint32_t num_out_eps = (GET_CORE_IF(_pcd))->dev_if->num_out_eps;
+ dwc_otg_pcd_ep_t * ep;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
+ INIT_LIST_HEAD(&_pcd->gadget.ep_list);
+ _pcd->gadget.ep0 = &_pcd->ep0.ep;
+ _pcd->gadget.speed = USB_SPEED_UNKNOWN;
+ INIT_LIST_HEAD(&_pcd->gadget.ep0->ep_list);
+
+ /**
+ * Initialize the EP0 structure.
+ */
+ ep = &_pcd->ep0;
+
+ /* Init EP structure */
+ ep->desc = 0;
+ ep->pcd = _pcd;
+ ep->stopped = 1;
+
+ /* Init DWC ep structure */
+ ep->dwc_ep.num = 0;
+ ep->dwc_ep.active = 0;
+ ep->dwc_ep.tx_fifo_num = 0;
+
+ /* Control until ep is actvated */
+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
+ ep->dwc_ep.dma_addr = 0;
+ ep->dwc_ep.start_xfer_buff = 0;
+ ep->dwc_ep.xfer_buff = 0;
+ ep->dwc_ep.xfer_len = 0;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = 0;
+ ep->queue_sof = 0;
+
+ /* Init the usb_ep structure. */
+ ep->ep.name = names[0];
+ ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->ep.maxpacket = MAX_PACKET_SIZE;
+ list_add_tail(&ep->ep.ep_list, &_pcd->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+
+ /**
+ * Initialize the EP structures.
+ */
+ in_ep_cntr = 0;
+ hwcfg1 = (GET_CORE_IF(_pcd))->hwcfg1.d32 >> 3;
+ for (i = 1; in_ep_cntr < num_in_eps; i++) {
+ if ((hwcfg1 & 0x1) == 0) {
+ dwc_otg_pcd_ep_t * ep = &_pcd->in_ep[in_ep_cntr];
+ in_ep_cntr++;
+
+ /* Init EP structure */
+ ep->desc = 0;
+ ep->pcd = _pcd;
+ ep->stopped = 1;
+
+ /* Init DWC ep structure */
+ ep->dwc_ep.is_in = 1;
+ ep->dwc_ep.num = i;
+ ep->dwc_ep.active = 0;
+ ep->dwc_ep.tx_fifo_num = 0;
+
+ /* Control until ep is actvated */
+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
+ ep->dwc_ep.dma_addr = 0;
+ ep->dwc_ep.start_xfer_buff = 0;
+ ep->dwc_ep.xfer_buff = 0;
+ ep->dwc_ep.xfer_len = 0;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = 0;
+ ep->queue_sof = 0;
+
+ /* Init the usb_ep structure. */
+ /**
+ * @todo NGS: Add direction to EP, based on contents
+ * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
+ * sprintf( ";r
+ */
+ ep->ep.name = names[i];
+ ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->ep.maxpacket = MAX_PACKET_SIZE;
+ list_add_tail(&ep->ep.ep_list, &_pcd->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+ }
+ hwcfg1 >>= 2;
+ }
+ out_ep_cntr = 0;
+ hwcfg1 = (GET_CORE_IF(_pcd))->hwcfg1.d32 >> 2;
+ for (i = 1; out_ep_cntr < num_out_eps; i++) {
+ if ((hwcfg1 & 0x1) == 0) {
+ dwc_otg_pcd_ep_t * ep = &_pcd->out_ep[out_ep_cntr];
+ out_ep_cntr++;
+
+ /* Init EP structure */
+ ep->desc = 0;
+ ep->pcd = _pcd;
+ ep->stopped = 1;
+
+ /* Init DWC ep structure */
+ ep->dwc_ep.is_in = 0;
+ ep->dwc_ep.num = i;
+ ep->dwc_ep.active = 0;
+ ep->dwc_ep.tx_fifo_num = 0;
+
+ /* Control until ep is actvated */
+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
+ ep->dwc_ep.dma_addr = 0;
+ ep->dwc_ep.start_xfer_buff = 0;
+ ep->dwc_ep.xfer_buff = 0;
+ ep->dwc_ep.xfer_len = 0;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = 0;
+ ep->queue_sof = 0;
+
+ /* Init the usb_ep structure. */
+ /**
+ * @todo NGS: Add direction to EP, based on contents
+ * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
+ * sprintf( ";r
+ */
+ ep->ep.name = names[15 + i];
+ ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->ep.maxpacket = MAX_PACKET_SIZE;
+ list_add_tail(&ep->ep.ep_list, &_pcd->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+ }
+ hwcfg1 >>= 2;
+ }
+
+ /* remove ep0 from the list. There is a ep0 pointer. */
+ list_del_init(&_pcd->ep0.ep.ep_list);
+ _pcd->ep0state = EP0_DISCONNECT;
+ _pcd->ep0.ep.maxpacket = MAX_EP0_SIZE;
+ _pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
+ _pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+}
+
+/**
+ * This function releases the Gadget device.
+ * required by device_unregister().
+ *
+ * @todo Should this do something? Should it free the PCD?
+ */
+static void dwc_otg_pcd_gadget_release(struct device *_dev)
+{
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _dev);
+}
+
+/**
+ * This function initialized the PCD portion of the driver.
+ *
+ */
+int __init dwc_otg_pcd_init(struct device *_dev)
+{
+ static char pcd_name[] = "dwc_otg_pcd";
+ dwc_otg_pcd_t * pcd;
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ int retval = 0;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _dev);
+
+ /*
+ * Allocate PCD structure
+ */
+ pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL);
+ if (pcd == 0) {
+ return -ENOMEM;
+ }
+ memset(pcd, 0, sizeof(dwc_otg_pcd_t));
+ spin_lock_init(&pcd->lock);
+ otg_dev->pcd = pcd;
+ s_pcd = pcd;
+ pcd->gadget.name = pcd_name;
+ dev_set_name(&pcd->gadget.dev, "gadget");
+ pcd->otg_dev = dev_get_drvdata(_dev);
+ pcd->gadget.dev.parent = _dev;
+ pcd->gadget.dev.release = dwc_otg_pcd_gadget_release;
+ pcd->gadget.ops = &dwc_otg_pcd_ops;
+ if (GET_CORE_IF(pcd)->hwcfg4.b.ded_fifo_en) {
+ DWC_PRINT("Dedicated Tx FIFOs mode\n");
+ } else {
+ DWC_PRINT("Shared Tx FIFO mode\n");
+ }
+
+ /* If the module is set to FS or if the PHY_TYPE is FS then the gadget
+ * should not report as dual-speed capable. replace the following line
+ * with the block of code below it once the software is debugged for
+ * this. If is_dualspeed = 0 then the gadget driver should not report
+ * a device qualifier descriptor when queried. */
+ if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL)
+ || ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2)
+ && (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1)
+ && (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) {
+ pcd->gadget.is_dualspeed = 0;
+ } else {
+ pcd->gadget.is_dualspeed = 1;
+ }
+ if ((otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE)
+ || (otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST)
+ || (otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
+ || (otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) {
+ pcd->gadget.is_otg = 0;
+ } else {
+ pcd->gadget.is_otg = 1;
+ }
+ pcd->driver = 0;
+
+ /* Register the gadget device */
+ retval = device_register(&pcd->gadget.dev);
+
+ /*
+ * Initialized the Core for Device mode.
+ */
+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
+ dwc_otg_core_dev_init(GET_CORE_IF(pcd));
+ }
+
+ /*
+ * Initialize EP structures
+ */
+ dwc_otg_pcd_reinit(pcd);
+
+ /*
+ * Register the PCD Callbacks.
+ */
+ dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks,pcd);
+
+ /*
+ * Setup interupt handler
+ */
+ DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n",otg_dev->irq);
+ retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq, IRQF_SHARED,
+ pcd->gadget.name, pcd);
+ if (retval != 0) {
+ DWC_ERROR("request of irq%d failed\n", otg_dev->irq);
+ kfree(pcd);
+ return -EBUSY;
+ }
+
+ /*
+ * Initialize the DMA buffer for SETUP packets
+ */
+ if (GET_CORE_IF(pcd)->dma_enable) {
+ pcd->setup_pkt = dma_alloc_coherent(_dev, sizeof(*pcd->setup_pkt) * 5,
+ &pcd->setup_pkt_dma_handle, 0);
+ pcd->status_buf = dma_alloc_coherent(_dev, sizeof(uint16_t),
+ &pcd->status_buf_dma_handle, 0);
+ } else {
+ pcd->setup_pkt = kmalloc(sizeof(*pcd->setup_pkt) * 5, GFP_KERNEL);
+ pcd->status_buf = kmalloc(sizeof(uint16_t), GFP_KERNEL);
+ }
+ if (pcd->setup_pkt == 0) {
+ kfree(pcd);
+ return -ENOMEM;
+ }
+
+ /* Initialize tasklet */
+ start_xfer_tasklet.data = (unsigned long)pcd;
+ pcd->start_xfer_tasklet = &start_xfer_tasklet;
+ return 0;
+}
+
+/**
+ * Cleanup the PCD.
+ */
+void dwc_otg_pcd_remove(struct device *_dev)
+{
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_pcd_t * pcd = otg_dev->pcd;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _dev);
+
+ /*
+ * Free the IRQ
+ */
+ free_irq(otg_dev->irq, pcd);
+
+ /* start with the driver above us */
+ if (pcd->driver) {
+
+ /* should have been done already by driver model core */
+ DWC_WARN("driver '%s' is still registered\n",pcd->driver->driver.name);
+ usb_gadget_unregister_driver(pcd->driver);
+ }
+ device_unregister(&pcd->gadget.dev);
+ if (GET_CORE_IF(pcd)->dma_enable) {
+ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt) * 5,
+ pcd->setup_pkt, pcd->setup_pkt_dma_handle);
+ dma_free_coherent(NULL, sizeof(uint16_t), pcd->status_buf,
+ pcd->status_buf_dma_handle);
+ } else {
+ kfree(pcd->setup_pkt);
+ kfree(pcd->status_buf);
+ }
+ kfree(pcd);
+ otg_dev->pcd = 0;
+}
+
+
+/**
+ * This function registers a gadget driver with the PCD.
+ *
+ * When a driver is successfully registered, it will receive control
+ * requests including set_configuration(), which enables non-control
+ * requests. then usb traffic follows until a disconnect is reported.
+ * then a host may connect again, or the driver might get unbound.
+ *
+ * @param _driver The driver being registered
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *_driver)
+{
+ int retval;
+ DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n",
+ _driver->driver.name);
+ if (!_driver || _driver->speed == USB_SPEED_UNKNOWN || !_driver->bind
+ || !_driver->disconnect || !_driver->setup) {
+ DWC_DEBUGPL(DBG_PCDV, "EINVAL\n");
+#if 1
+ printk("_driver=0x%p speed=0x%x bind=0x%p unbind=0x%p disconnect=0x%p setup=0x%p\n", _driver, _driver->speed, _driver->bind, _driver->unbind, _driver->disconnect, _driver->setup);
+#endif
+ return -EINVAL;
+ }
+ if (s_pcd == 0) {
+ DWC_DEBUGPL(DBG_PCDV, "ENODEV\n");
+ return -ENODEV;
+ }
+ if (s_pcd->driver != 0) {
+ DWC_DEBUGPL(DBG_PCDV, "EBUSY (%p)\n", s_pcd->driver);
+ return -EBUSY;
+ }
+
+ /* hook up the driver */
+ s_pcd->driver = _driver;
+ s_pcd->gadget.dev.driver = &_driver->driver;
+ {
+ dwc_otg_core_if_t *_core_if = s_pcd->otg_dev->core_if;
+ if(_core_if) {
+ dwc_otg_disable_global_interrupts(_core_if);
+ dwc_otg_core_init(_core_if);
+ dwc_otg_pcd_reinit(s_pcd);
+ dwc_otg_enable_global_interrupts(_core_if);
+ if (_core_if->pcd_cb)
+ dwc_otg_pcd_start_cb(_core_if->pcd_cb->p);
+ }
+
+ }
+ DWC_DEBUGPL(DBG_PCD, "bind to driver %s\n", _driver->driver.name);
+ retval = _driver->bind(&s_pcd->gadget);
+ if (retval) {
+ DWC_ERROR("bind to driver %s --> error %d\n",
+ _driver->driver.name, retval);
+ s_pcd->driver = 0;
+ s_pcd->gadget.dev.driver = 0;
+ return retval;
+ }
+ DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n",
+ _driver->driver.name);
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+/**
+ * This function unregisters a gadget driver
+ *
+ * @param _driver The driver being unregistered
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *_driver)
+{
+
+ //DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver);
+ if (s_pcd == 0) {
+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__,-ENODEV);
+ return -ENODEV;
+ }
+ if (_driver == 0 || _driver != s_pcd->driver) {
+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__,-EINVAL);
+ return -EINVAL;
+ }
+ _driver->unbind(&s_pcd->gadget);
+ s_pcd->driver = 0;
+ DWC_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n",
+ _driver->driver.name);
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+#endif /* DWC_HOST_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h b/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h
index c2fe8caecf6..540451ec06d 100644
--- a/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h
+++ b/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h
@@ -242,6 +242,8 @@ static inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new )
* mode. */
#define DBG_HCD_URB (0x800)
+#define DBG_SP (0x400)
+
/** When debug level has any bit set, display debug messages */
#define DBG_ANY (0xFF)
@@ -268,7 +270,7 @@ static inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new )
* usb-DWC_otg: dwc_otg_cil_init(ca867000)
* </code>
*/
-#ifdef DEBUG
+#ifdef CONFIG_DWC_DEBUG
# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_ERR USB_DWC x ); }while(0)
# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x )
diff --git a/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h.org b/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h.org
new file mode 100644
index 00000000000..c2fe8caecf6
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h.org
@@ -0,0 +1,304 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/platform/dwc_otg_plat.h $
+ * $Revision: #1 $
+ * $Date: 2005/07/07 $
+ * $Change: 510301 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_PLAT_H__)
+#define __DWC_OTG_PLAT_H__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+/**
+ * @file
+ *
+ * This file contains the Platform Specific constants, interfaces
+ * (functions and macros) for Linux.
+ *
+ */
+#if !defined(CONFIG_4xx)
+#error "The contents of this file are AMCC 44x processor specific!!!"
+#endif
+
+#if defined(CONFIG_405EX) || defined(CONFIG_460EX) || \
+ defined(CONFIG_APM82181)
+#define CONFIG_DWC_OTG_REG_LE
+#endif
+
+#if defined(CONFIG_405EZ)
+#define CONFIG_DWC_OTG_FIFO_LE
+#endif
+
+#define SZ_256K 0x00040000
+/**
+ * Reads the content of a register.
+ *
+ * @param _reg address of register to read.
+ * @return contents of the register.
+ *
+
+ * Usage:<br>
+ * <code>uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl);</code>
+ */
+static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *_reg)
+{
+#ifdef CONFIG_DWC_OTG_REG_LE
+ return in_le32(_reg);
+#else
+ return in_be32(_reg);
+#endif
+};
+
+/**
+ * Writes a register with a 32 bit value.
+ *
+ * @param _reg address of register to read.
+ * @param _value to write to _reg.
+ *
+ * Usage:<br>
+ * <code>dwc_write_reg32(&dev_regs->dctl, 0); </code>
+ */
+static __inline__ void dwc_write_reg32( volatile uint32_t *_reg, const uint32_t _value)
+{
+#ifdef CONFIG_DWC_OTG_REG_LE
+ out_le32(_reg, _value);
+#else
+ out_be32(_reg, _value);
+#endif
+};
+
+/**
+ * This function modifies bit values in a register. Using the
+ * algorithm: (reg_contents & ~clear_mask) | set_mask.
+ *
+ * @param _reg address of register to read.
+ * @param _clear_mask bit mask to be cleared.
+ * @param _set_mask bit mask to be set.
+ *
+ * Usage:<br>
+ * <code> // Clear the SOF Interrupt Mask bit and <br>
+ * // set the OTG Interrupt mask bit, leaving all others as they were.
+ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);</code>
+ */
+static __inline__
+void dwc_modify_reg32( volatile uint32_t *_reg, const uint32_t _clear_mask, const uint32_t _set_mask)
+{
+#ifdef CONFIG_DWC_OTG_REG_LE
+ out_le32( _reg, (in_le32(_reg) & ~_clear_mask) | _set_mask );
+#else
+ out_be32( _reg, (in_be32(_reg) & ~_clear_mask) | _set_mask );
+#endif
+};
+
+static __inline__ void dwc_write_datafifo32( volatile uint32_t *_reg, const uint32_t _value)
+{
+#ifdef CONFIG_DWC_OTG_FIFO_LE
+ out_le32(_reg, _value);
+#else
+ out_be32(_reg, _value);
+#endif
+};
+
+static __inline__ uint32_t dwc_read_datafifo32( volatile uint32_t *_reg)
+{
+#ifdef CONFIG_DWC_OTG_FIFO_LE
+ return in_le32(_reg);
+#else
+ return in_be32(_reg);
+#endif
+};
+
+
+/**
+ * Wrapper for the OS micro-second delay function.
+ * @param[in] _usecs Microseconds of delay
+ */
+static __inline__ void UDELAY( const uint32_t _usecs )
+{
+ udelay( _usecs );
+}
+
+/**
+ * Wrapper for the OS milli-second delay function.
+ * @param[in] _msecs milliseconds of delay
+ */
+static __inline__ void MDELAY( const uint32_t _msecs )
+{
+ mdelay( _msecs );
+}
+
+/**
+ * Wrapper for the Linux spin_lock. On the ARM (Integrator)
+ * spin_lock() is a nop.
+ *
+ * @param _lock Pointer to the spinlock.
+ */
+static __inline__ void SPIN_LOCK( spinlock_t *_lock )
+{
+ spin_lock(_lock);
+}
+
+/**
+ * Wrapper for the Linux spin_unlock. On the ARM (Integrator)
+ * spin_lock() is a nop.
+ *
+ * @param _lock Pointer to the spinlock.
+ */
+static __inline__ void SPIN_UNLOCK( spinlock_t *_lock )
+{
+ spin_unlock(_lock);
+}
+
+/**
+ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM
+ * (Integrator) spin_lock() is a nop.
+ *
+ * @param _l Pointer to the spinlock.
+ * @param _f unsigned long for irq flags storage.
+ */
+#define SPIN_LOCK_IRQSAVE( _l, _f ) { \
+ spin_lock_irqsave(_l,_f); \
+ }
+
+/**
+ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM
+ * (Integrator) spin_lock() is a nop.
+ *
+ * @param _l Pointer to the spinlock.
+ * @param _f unsigned long for irq flags storage.
+ */
+#define SPIN_UNLOCK_IRQRESTORE( _l,_f ) {\
+ spin_unlock_irqrestore(_l,_f); \
+ }
+
+
+/*
+ * Debugging support vanishes in non-debug builds.
+ */
+
+
+/**
+ * The Debug Level bit-mask variable.
+ */
+extern uint32_t g_dbg_lvl;
+/**
+ * Set the Debug Level variable.
+ */
+static inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new )
+{
+ uint32_t old = g_dbg_lvl;
+ g_dbg_lvl = _new;
+ return old;
+}
+
+/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */
+#define DBG_CIL (0x2)
+/** When debug level has the DBG_CILV bit set, display CIL Verbose debug
+ * messages */
+#define DBG_CILV (0x20)
+/** When debug level has the DBG_PCD bit set, display PCD (Device) debug
+ * messages */
+#define DBG_PCD (0x4)
+/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug
+ * messages */
+#define DBG_PCDV (0x40)
+/** When debug level has the DBG_HCD bit set, display Host debug messages */
+#define DBG_HCD (0x8)
+/** When debug level has the DBG_HCDV bit set, display Verbose Host debug
+ * messages */
+#define DBG_HCDV (0x80)
+/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host
+ * mode. */
+#define DBG_HCD_URB (0x800)
+
+/** When debug level has any bit set, display debug messages */
+#define DBG_ANY (0xFF)
+
+/** All debug messages off */
+#define DBG_OFF 0
+
+/** Prefix string for DWC_DEBUG print macros. */
+#define USB_DWC "dwc_otg: "
+
+/**
+ * Print a debug message when the Global debug level variable contains
+ * the bit defined in <code>lvl</code>.
+ *
+ * @param[in] lvl - Debug level, use one of the DBG_ constants above.
+ * @param[in] x - like printf
+ *
+ * Example:<p>
+ * <code>
+ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr);
+ * </code>
+ * <br>
+ * results in:<br>
+ * <code>
+ * usb-DWC_otg: dwc_otg_cil_init(ca867000)
+ * </code>
+ */
+#ifdef DEBUG
+# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_ERR USB_DWC x ); }while(0)
+# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x )
+
+# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl)
+
+#else
+
+# define DWC_DEBUGPL(lvl, x...) do{}while(0)
+# define DWC_DEBUGP(x...)
+
+# define CHK_DEBUG_LEVEL(level) (0)
+
+#endif /*DEBUG*/
+
+/**
+ * Print an Error message.
+ */
+#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x )
+/**
+ * Print a Warning message.
+ */
+#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x )
+/**
+ * Print a notice (normal but significant message).
+ */
+#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x )
+/**
+ * Basic message printing.
+ */
+#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x )
+
+#endif
+
diff --git a/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h
index c4a4f2806f6..13043152971 100644
--- a/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h
+++ b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h
@@ -32,7 +32,7 @@
#define DCRN_DMASR_BASE 0x120
#endif
-#ifdef CONFIG_460EX || defined(CONFIG_APM82181)
+#if defined(CONFIG_460EX) || defined(CONFIG_APM82181)
#define DCRN_DMA0_BASE 0x200
#define DCRN_DMASR_BASE 0x220
#endif
diff --git a/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h.org b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h.org
new file mode 100644
index 00000000000..c4a4f2806f6
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h.org
@@ -0,0 +1,620 @@
+/*
+ * include/asm-ppc/ppc4xx_dma.h
+ *
+ * IBM PPC4xx DMA engine library
+ *
+ * Copyright 2000-2004 MontaVista Software Inc.
+ *
+ * Cleaned up a bit more, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Original code by Armin Kuster <akuster@mvista.com>
+ * and Pete Popov <ppopov@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASMPPC_PPC4xx_DMA_H
+#define __ASMPPC_PPC4xx_DMA_H
+
+#include <linux/types.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_405EX
+#define DCRN_DMA0_BASE 0x100
+#define DCRN_DMASR_BASE 0x120
+#endif
+
+#ifdef CONFIG_460EX || defined(CONFIG_APM82181)
+#define DCRN_DMA0_BASE 0x200
+#define DCRN_DMASR_BASE 0x220
+#endif
+
+#ifndef DCRN_DMA0_BASE
+#error DMA register not defined for this PPC4xx variant!
+#endif
+
+#define DCRN_DMACR0 (DCRN_DMA0_BASE + 0x0) /* DMA Channel Control 0 */
+#define DCRN_DMACT0 (DCRN_DMA0_BASE + 0x1) /* DMA Count 0 */
+#define DCRN_DMASAH0 (DCRN_DMA0_BASE + 0x2) /* DMA Src Addr High 0 */
+#define DCRN_DMASA0 (DCRN_DMA0_BASE + 0x3) /* DMA Src Addr Low 0 */
+#define DCRN_DMADAH0 (DCRN_DMA0_BASE + 0x4) /* DMA Dest Addr High 0 */
+#define DCRN_DMADA0 (DCRN_DMA0_BASE + 0x5) /* DMA Dest Addr Low 0 */
+#define DCRN_ASGH0 (DCRN_DMA0_BASE + 0x6) /* DMA SG Desc Addr High 0 */
+#define DCRN_ASG0 (DCRN_DMA0_BASE + 0x7) /* DMA SG Desc Addr Low 0 */
+
+#define DCRN_DMASR (DCRN_DMASR_BASE + 0x0) /* DMA Status Register */
+#define DCRN_ASGC (DCRN_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */
+#define DCRN_SLP (DCRN_DMASR_BASE + 0x5) /* DMA Sleep Register */
+#define DCRN_POL (DCRN_DMASR_BASE + 0x6) /* DMA Polarity Register */
+
+#undef DEBUG_4xxDMA
+
+#define MAX_PPC4xx_DMA_CHANNELS 4
+
+#define DMA_CH0 0
+#define DMA_CH1 1
+#define DMA_CH2 2
+#define DMA_CH3 3
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD 0
+#define DMA_STATUS_BAD_CHANNEL 1
+#define DMA_STATUS_BAD_HANDLE 2
+#define DMA_STATUS_BAD_MODE 3
+#define DMA_STATUS_NULL_POINTER 4
+#define DMA_STATUS_OUT_OF_MEMORY 5
+#define DMA_STATUS_SGL_LIST_EMPTY 6
+#define DMA_STATUS_GENERAL_ERROR 7
+#define DMA_STATUS_CHANNEL_NOTFREE 8
+
+#define DMA_CHANNEL_BUSY 0x80000000
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR 0
+#define DMA_STATUS_CS 1 /* Count Status */
+#define DMA_STATUS_TS 2 /* Transfer Status */
+#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
+#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
+
+
+/*
+ * DMA Channel Control Registers
+ */
+
+/* The 44x devices have 64bit DMA controllers, where the 405EX/r have 32bit */
+#if defined(CONFIG_44x)
+#define PPC4xx_DMA_64BIT
+#endif
+
+/* The 44x and 405EX/r come up big-endian with last bit reserved */
+#if defined(CONFIG_44x) || defined(CONFIG_405EX) || defined(CONFIG_405EXr)
+#define DMA_CR_OFFSET 1
+#else
+#define DMA_CR_OFFSET 0
+#endif
+
+#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */
+#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD (1<<29)
+#define SET_DMA_TD(x) (((x)&0x1)<<29)
+#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
+
+#define DMA_PL (1<<28) /* Peripheral Location */
+#define SET_DMA_PL(x) (((x)&0x1)<<28)
+#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL 0
+#define INTERNAL_PERIPHERAL 1
+
+#define SET_DMA_PW(x) (((x)&0x3)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
+#define DMA_PW_MASK SET_DMA_PW(3)
+#define PW_8 0
+#define PW_16 1
+#define PW_32 2
+#define PW_64 3
+/* FIXME: Add PW_128 support for 440GP DMA block */
+#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
+
+#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */
+#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET))
+
+#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */
+#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET))
+
+#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */
+#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET))
+
+#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
+#define DMA_TM_MASK SET_DMA_TM(3)
+#define TM_PERIPHERAL 0 /* Peripheral */
+#define TM_RESERVED 1 /* Reserved */
+#define TM_S_MM 2 /* Memory to Memory */
+#define TM_D_MM 3 /* Device Paced Memory to Memory */
+#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
+
+#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */
+#define DMA_PSC_MASK SET_DMA_PSC(3)
+#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
+
+#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */
+#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
+
+#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */
+#define DMA_PHC_MASK SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
+
+#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */
+#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET))
+
+#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET))
+#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET))
+
+#define DMA_DEC (1<<(2)) /* Address Decrement */
+#define SET_DMA_DEC(x) (((x)&0x1)<<2)
+#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+
+#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
+
+ /* Device-paced memory to memory, */
+ /* device is at source address */
+#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
+
+ /* Device-paced memory to memory, */
+ /* device is at destination address */
+#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
+
+/* 405gp/440gp */
+#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */
+#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
+#define PREFETCH_1 0 /* Prefetch 1 Double Word */
+#define PREFETCH_2 1
+#define PREFETCH_4 2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
+
+#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
+#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET))
+#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
+
+/* stb3x */
+
+#define DMA_ECE_ENABLE (1<<5)
+#define SET_DMA_ECE(x) (((x)&0x1)<<5)
+#define GET_DMA_ECE(x) (((x)&DMA_ECE_ENABLE)>>5)
+
+#define DMA_TCD_DISABLE (1<<4)
+#define SET_DMA_TCD(x) (((x)&0x1)<<4)
+#define GET_DMA_TCD(x) (((x)&DMA_TCD_DISABLE)>>4)
+
+typedef uint32_t sgl_handle_t;
+
+#ifdef CONFIG_PPC4xx_EDMA
+
+#define SGL_LIST_SIZE 4096
+#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
+
+#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW 0
+#define PRIORITY_MID_LOW 1
+#define PRIORITY_MID_HIGH 2
+#define PRIORITY_HIGH 3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
+#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
+#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<18)
+#define DMA_IN_DMA_REQ2 (1<<17)
+#define DMA_IN_DMA_REQ3 (1<<16)
+
+#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<14)
+#define DMA_EXT_DMA_REQ2 (1<<13)
+#define DMA_EXT_DMA_REQ3 (1<<12)
+
+#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<10)
+#define DMA_CH2_BUSY (1<<9)
+#define DMA_CH3_BUSY (1<<8)
+
+#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1 (1<<6)
+#define DMA_SG2 (1<<5)
+#define DMA_SG3 (1<<4)
+
+/* DMA Channel Count Register */
+#define DMA_CTC_TCIE (1<<29) /* Terminal Count Interrupt Enable */
+#define DMA_CTC_ETIE (1<<28) /* EOT Interupt Enable */
+#define DMA_CTC_EIE (1<<27) /* Error Interrupt Enable */
+#define DMA_CTC_BTEN (1<<23) /* Burst Enable/Disable bit */
+#define DMA_CTC_BSIZ_MSK (3<<21) /* Mask of the Burst size bits */
+#define DMA_CTC_BSIZ_2 (0)
+#define DMA_CTC_BSIZ_4 (1<<21)
+#define DMA_CTC_BSIZ_8 (2<<21)
+#define DMA_CTC_BSIZ_16 (3<<21)
+#define DMA_CTC_TC_MASK 0xFFFFF
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */
+#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK (1<<31) /* Link */
+#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
+#define SG_COUNT_MASK 0xFFFF /* Count Field */
+
+#define SET_DMA_CONTROL \
+ (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+ SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
+ SET_DMA_PL(p_init->pl) | /* peripheral location */ \
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
+ SET_DMA_PREFETCH(p_init->pf) /* read prefetch */)
+
+#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
+
+#elif defined(CONFIG_STB03xxx) /* stb03xxx */
+
+#define DMA_PPC4xx_SIZE 4096
+
+/*
+ * DMA Status Register
+ */
+
+#define SET_DMA_PRIORITY(x) (((x)&0x00800001)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK 0x00800001
+#define PRIORITY_LOW 0x00000000
+#define PRIORITY_MID_LOW 0x00000001
+#define PRIORITY_MID_HIGH 0x00800000
+#define PRIORITY_HIGH 0x00800001
+#define GET_DMA_PRIORITY(x) (((((x)&DMA_PRIORITY_MASK) &0x00800000) >> 22 ) | (((x)&DMA_PRIORITY_MASK) &0x00000001))
+
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_CT0 (1<<19) /* Chained transfere */
+
+#define DMA_IN_DMA_REQ0 (1<<18) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<17)
+#define DMA_IN_DMA_REQ2 (1<<16)
+#define DMA_IN_DMA_REQ3 (1<<15)
+
+#define DMA_EXT_DMA_REQ0 (1<<14) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<13)
+#define DMA_EXT_DMA_REQ2 (1<<12)
+#define DMA_EXT_DMA_REQ3 (1<<11)
+
+#define DMA_CH0_BUSY (1<<10) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<9)
+#define DMA_CH2_BUSY (1<<8)
+#define DMA_CH3_BUSY (1<<7)
+
+#define DMA_CT1 (1<<6) /* Chained transfere */
+#define DMA_CT2 (1<<5)
+#define DMA_CT3 (1<<4)
+
+#define DMA_CH_ENABLE (1<<7)
+#define SET_DMA_CH(x) (((x)&0x1)<<7)
+#define GET_DMA_CH(x) (((x)&DMA_CH_ENABLE)>>7)
+
+/* STBx25xxx dma unique */
+/* enable device port on a dma channel
+ * example ext 0 on dma 1
+ */
+
+#define SSP0_RECV 15
+#define SSP0_XMIT 14
+#define EXT_DMA_0 12
+#define SC1_XMIT 11
+#define SC1_RECV 10
+#define EXT_DMA_2 9
+#define EXT_DMA_3 8
+#define SERIAL2_XMIT 7
+#define SERIAL2_RECV 6
+#define SC0_XMIT 5
+#define SC0_RECV 4
+#define SERIAL1_XMIT 3
+#define SERIAL1_RECV 2
+#define SERIAL0_XMIT 1
+#define SERIAL0_RECV 0
+
+#define DMA_CHAN_0 1
+#define DMA_CHAN_1 2
+#define DMA_CHAN_2 3
+#define DMA_CHAN_3 4
+
+/* end STBx25xx */
+
+/*
+ * Bit 30 must be one for Redwoods, otherwise transfers may receive errors.
+ */
+#define DMA_CR_MB0 0x2
+
+#define SET_DMA_CONTROL \
+ (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
+ SET_DMA_PL(p_init->pl) | /* peripheral location */ \
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
+ SET_DMA_TCD(p_init->tcd_disable) | /* TC chain mode disable */ \
+ SET_DMA_ECE(p_init->ece_enable) | /* ECE chanin mode enable */ \
+ SET_DMA_CH(p_init->ch_enable) | /* Chain enable */ \
+ DMA_CR_MB0 /* must be one */)
+
+#define GET_DMA_POLARITY(chan) chan
+
+#endif
+
+typedef struct {
+ unsigned short in_use; /* set when channel is being used, clr when
+ * available.
+ */
+ /*
+ * Valid polarity settings:
+ * DMAReq_ActiveLow(n)
+ * DMAAck_ActiveLow(n)
+ * EOT_ActiveLow(n)
+ *
+ * n is 0 to max dma chans
+ */
+ unsigned int polarity;
+
+ char buffer_enable; /* Boolean: buffer enable */
+ char tce_enable; /* Boolean: terminal count enable */
+ char etd_output; /* Boolean: eot pin is a tc output */
+ char pce; /* Boolean: parity check enable */
+
+ /*
+ * Peripheral location:
+ * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+ * EXTERNAL_PERIPHERAL
+ */
+ char pl; /* internal/external peripheral */
+
+ /*
+ * Valid pwidth settings:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ */
+ unsigned int pwidth;
+
+ char dai; /* Boolean: dst address increment */
+ char sai; /* Boolean: src address increment */
+
+ /*
+ * Valid psc settings: 0-3
+ */
+ unsigned int psc; /* Peripheral Setup Cycles */
+
+ /*
+ * Valid pwc settings:
+ * 0-63
+ */
+ unsigned int pwc; /* Peripheral Wait Cycles */
+
+ /*
+ * Valid phc settings:
+ * 0-7
+ */
+ unsigned int phc; /* Peripheral Hold Cycles */
+
+ /*
+ * Valid cp (channel priority) settings:
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ */
+ unsigned int cp; /* channel priority */
+
+ /*
+ * Valid pf (memory read prefetch) settings:
+ *
+ * PREFETCH_1
+ * PREFETCH_2
+ * PREFETCH_4
+ */
+ unsigned int pf; /* memory read prefetch */
+
+ /*
+ * Boolean: channel interrupt enable
+ * NOTE: for sgl transfers, only the last descriptor will be setup to
+ * interrupt.
+ */
+ char int_enable;
+
+ char shift; /* easy access to byte_count shift, based on */
+ /* the width of the channel */
+
+ uint32_t control; /* channel control word */
+
+ /* These variabled are used ONLY in single dma transfers */
+ unsigned int mode; /* transfer mode */
+ phys_addr_t addr;
+ char ce; /* channel enable */
+#ifdef CONFIG_STB03xxx
+ char ch_enable;
+ char tcd_disable;
+ char ece_enable;
+ char td; /* transfer direction */
+#endif
+
+ char int_on_final_sg;/* for scatter/gather - only interrupt on last sg */
+} ppc_dma_ch_t;
+
+/*
+ * PPC44x DMA implementations have a slightly different
+ * descriptor layout. Probably moved about due to the
+ * change to 64-bit addresses and link pointer. I don't
+ * know why they didn't just leave control_count after
+ * the dst_addr.
+ */
+#ifdef PPC4xx_DMA_64BIT
+typedef struct {
+ uint32_t control;
+ uint32_t control_count;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ phys_addr_t next;
+} ppc_sgl_t;
+#else
+typedef struct {
+ uint32_t control;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+#endif
+
+typedef struct {
+ unsigned int dmanr;
+ uint32_t control; /* channel ctrl word; loaded from each descrptr */
+ uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
+ dma_addr_t dma_addr; /* dma (physical) address of this list */
+ ppc_sgl_t *phead;
+ dma_addr_t phead_dma;
+ ppc_sgl_t *ptail;
+ dma_addr_t ptail_dma;
+} sgl_list_info_t;
+
+typedef struct {
+ phys_addr_t *src_addr;
+ phys_addr_t *dst_addr;
+ phys_addr_t dma_src_addr;
+ phys_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+extern ppc_dma_ch_t dma_channels[];
+
+/*
+ * The DMA API are in ppc4xx_dma.c and ppc4xx_sgdma.c
+ */
+extern int ppc4xx_init_dma_channel(unsigned int, ppc_dma_ch_t *);
+extern int ppc4xx_get_channel_config(unsigned int, ppc_dma_ch_t *);
+extern int ppc4xx_set_channel_priority(unsigned int, unsigned int);
+extern unsigned int ppc4xx_get_peripheral_width(unsigned int);
+extern void ppc4xx_set_sg_addr(int, phys_addr_t);
+extern int ppc4xx_add_dma_sgl(sgl_handle_t, phys_addr_t, phys_addr_t, unsigned int);
+extern void ppc4xx_enable_dma_sgl(sgl_handle_t);
+extern void ppc4xx_disable_dma_sgl(sgl_handle_t);
+extern int ppc4xx_get_dma_sgl_residue(sgl_handle_t, phys_addr_t *, phys_addr_t *);
+extern int ppc4xx_delete_dma_sgl_element(sgl_handle_t, phys_addr_t *, phys_addr_t *);
+extern int ppc4xx_alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
+extern void ppc4xx_free_dma_handle(sgl_handle_t);
+extern int ppc4xx_get_dma_status(void);
+extern int ppc4xx_enable_burst(unsigned int);
+extern int ppc4xx_disable_burst(unsigned int);
+extern int ppc4xx_set_burst_size(unsigned int, unsigned int);
+extern void ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr);
+extern void ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr);
+extern void ppc4xx_enable_dma(unsigned int dmanr);
+extern void ppc4xx_disable_dma(unsigned int dmanr);
+extern void ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count);
+extern int ppc4xx_get_dma_residue(unsigned int dmanr);
+extern void ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
+ phys_addr_t dst_dma_addr);
+extern int ppc4xx_enable_dma_interrupt(unsigned int dmanr);
+extern int ppc4xx_disable_dma_interrupt(unsigned int dmanr);
+extern int ppc4xx_clr_dma_status(unsigned int dmanr);
+extern int ppc4xx_map_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
+extern int ppc4xx_disable_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
+extern int ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode);
+
+/* These are in kernel/dma.c: */
+
+/* reserve a DMA channel */
+extern int request_dma(unsigned int dmanr, const char *device_id);
+/* release it again */
+extern void free_dma(unsigned int dmanr);
+#endif
+#endif /* __KERNEL__ */
diff --git a/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.c.sdiff b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.c.sdiff
new file mode 100644
index 00000000000..e0e9208285d
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.c.sdiff
@@ -0,0 +1,2944 @@
+/* ========================================================== /* ==========================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers
+ * $Revision: #16 $ * $Revision: #16 $
+ * $Date: 2006/12/05 $ * $Date: 2006/12/05 $
+ * $Change: 762293 $ * $Change: 762293 $
+ * *
+ * Synopsys HS OTG Linux Software Driver and documentation (h * Synopsys HS OTG Linux Software Driver and documentation (h
+ * "Software") is an Unsupported proprietary work of Synopsys * "Software") is an Unsupported proprietary work of Synopsys
+ * otherwise expressly agreed to in writing between Synopsys * otherwise expressly agreed to in writing between Synopsys
+ * *
+ * The Software IS NOT an item of Licensed Software or Licens * The Software IS NOT an item of Licensed Software or Licens
+ * any End User Software License Agreement or Agreement for L * any End User Software License Agreement or Agreement for L
+ * with Synopsys or any supplement thereto. You are permitted * with Synopsys or any supplement thereto. You are permitted
+ * redistribute this Software in source and binary forms, wit * redistribute this Software in source and binary forms, wit
+ * modification, provided that redistributions of source code * modification, provided that redistributions of source code
+ * notice. You may not view, use, disclose, copy or distribut * notice. You may not view, use, disclose, copy or distribut
+ * any information contained herein except pursuant to this l * any information contained herein except pursuant to this l
+ * Synopsys. If you do not agree with this notice, including * Synopsys. If you do not agree with this notice, including
+ * below, then you are not authorized to use the Software. * below, then you are not authorized to use the Software.
+ * *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO
+ * DAMAGE. * DAMAGE.
+ * ========================================================== * ==========================================================
+
+#ifndef CONFIG_DWC_DEVICE_ONLY | #ifndef DWC_DEVICE_ONLY
+
+/** /**
+ * @file * @file
+ * *
+ * This file contains the implementation of the HCD. In Linux * This file contains the implementation of the HCD. In Linux
+ * implements the hc_driver API. * implements the hc_driver API.
+ */ */
+#include <linux/kernel.h> #include <linux/kernel.h>
+#include <linux/module.h> #include <linux/module.h>
+#include <linux/moduleparam.h> #include <linux/moduleparam.h>
+#include <linux/init.h> #include <linux/init.h>
+#include <linux/device.h> #include <linux/device.h>
+#include <linux/errno.h> #include <linux/errno.h>
+#include <linux/list.h> #include <linux/list.h>
+#include <linux/interrupt.h> #include <linux/interrupt.h>
+#include <linux/string.h> #include <linux/string.h>
+#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h" #include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h" #include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h" #include "dwc_otg_regs.h"
+extern atomic_t release_later; extern atomic_t release_later;
+
+static u64 dma_mask = DMA_BIT_MASK(32); static u64 dma_mask = DMA_BIT_MASK(32);
+
+static const char dwc_otg_hcd_name[] = "dwc_otg_hcd"; static const char dwc_otg_hcd_name[] = "dwc_otg_hcd";
+
+static int dwc_otg_hcd_suspend(struct usb_hcd *hcd) static int dwc_otg_hcd_suspend(struct usb_hcd *hcd)
+{ {
+ /* FIXME: Write code to right suspend processing */ /* FIXME: Write code to right suspend processing */
+ return 0; return 0;
+} }
+
+static int dwc_otg_hcd_resume(struct usb_hcd *hcd) static int dwc_otg_hcd_resume(struct usb_hcd *hcd)
+{ {
+ /* FIXME: Write code to right resume processing */ /* FIXME: Write code to right resume processing */
+ return 0; return 0;
+} }
+
+static const struct hc_driver dwc_otg_hc_driver = static const struct hc_driver dwc_otg_hc_driver =
+{ {
+ .description = dwc_otg_hcd_name, .description = dwc_otg_hcd_name,
+ .product_desc = "DWC OTG Controller", .product_desc = "DWC OTG Controller",
+ .hcd_priv_size = sizeof(dwc_otg_hcd_t), .hcd_priv_size = sizeof(dwc_otg_hcd_t),
+ .irq = dwc_otg_hcd_irq, .irq = dwc_otg_hcd_irq,
+ .flags = HCD_MEMORY | HCD_USB2, .flags = HCD_MEMORY | HCD_USB2,
+ //.reset = | //.reset =
+ .start = dwc_otg_hcd_start, | .start = dwc_otg_hcd_start,
+#ifdef CONFIG_PM #ifdef CONFIG_PM
+ .bus_suspend = dwc_otg_hcd_suspend, .bus_suspend = dwc_otg_hcd_suspend,
+ .bus_resume = dwc_otg_hcd_resume, | .bus_resume = dwc_otg_hcd_resume,
+#endif #endif
+ .stop = dwc_otg_hcd_stop, | .stop = dwc_otg_hcd_stop,
+ .urb_enqueue = dwc_otg_hcd_urb_enqueue, .urb_enqueue = dwc_otg_hcd_urb_enqueue,
+ .urb_dequeue = dwc_otg_hcd_urb_dequeue, .urb_dequeue = dwc_otg_hcd_urb_dequeue,
+ .endpoint_disable = dwc_otg_hcd_endpoint_disable, .endpoint_disable = dwc_otg_hcd_endpoint_disable,
+ .get_frame_number = dwc_otg_hcd_get_frame_number, .get_frame_number = dwc_otg_hcd_get_frame_number,
+ .hub_status_data = dwc_otg_hcd_hub_status_data, .hub_status_data = dwc_otg_hcd_hub_status_data,
+ .hub_control = dwc_otg_hcd_hub_control, .hub_control = dwc_otg_hcd_hub_control,
+ //.hub_suspend = | //.hub_suspend =
+ //.hub_resume = | //.hub_resume =
+}; };
+
+
+/** /**
+ * Work queue function for starting the HCD when A-Cable is c * Work queue function for starting the HCD when A-Cable is c
+ * The dwc_otg_hcd_start() must be called in a process contex * The dwc_otg_hcd_start() must be called in a process contex
+ */ */
+static void hcd_start_func(struct work_struct *work) static void hcd_start_func(struct work_struct *work)
+{ {
+ struct dwc_otg_hcd *priv = struct dwc_otg_hcd *priv =
+ container_of(work, struct dwc_otg_hcd, start_ container_of(work, struct dwc_otg_hcd, start_
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)priv->_p; struct usb_hcd *usb_hcd = (struct usb_hcd *)priv->_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd) DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd)
+ if (usb_hcd) { if (usb_hcd) {
+ dwc_otg_hcd_start(usb_hcd); dwc_otg_hcd_start(usb_hcd);
+ } }
+} }
+
+
+/** /**
+ * HCD Callback function for starting the HCD when A-Cable is * HCD Callback function for starting the HCD when A-Cable is
+ * connected. * connected.
+ * *
+ * @param _p void pointer to the <code>struct usb_hcd</code> * @param _p void pointer to the <code>struct usb_hcd</code>
+ */ */
+static int32_t dwc_otg_hcd_start_cb(void *_p) static int32_t dwc_otg_hcd_start_cb(void *_p)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p); dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if; dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0; hprt0_data_t hprt0;
+ if (core_if->op_state == B_HOST) { if (core_if->op_state == B_HOST) {
+ /* /*
+ * Reset the port. During a HNP mode switch the * Reset the port. During a HNP mode switch the
+ * needs to occur within 1ms and have a duration * needs to occur within 1ms and have a duration
+ * least 50ms. * least 50ms.
+ */ */
+ hprt0.d32 = dwc_otg_read_hprt0(core_if); hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1; hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt dwc_write_reg32(core_if->host_if->hprt0, hprt
+ ((struct usb_hcd *)_p)->self.is_b_host = 1; ((struct usb_hcd *)_p)->self.is_b_host = 1;
+ } else { } else {
+ ((struct usb_hcd *)_p)->self.is_b_host = 0; ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ } }
+ /* Need to start the HCD in a non-interrupt context. /* Need to start the HCD in a non-interrupt context.
+ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func); INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
+ dwc_otg_hcd->_p = _p; dwc_otg_hcd->_p = _p;
+ schedule_work(&dwc_otg_hcd->start_work); schedule_work(&dwc_otg_hcd->start_work);
+ return 1; return 1;
+} }
+
+
+/** /**
+ * HCD Callback function for stopping the HCD. * HCD Callback function for stopping the HCD.
+ * *
+ * @param _p void pointer to the <code>struct usb_hcd</code> * @param _p void pointer to the <code>struct usb_hcd</code>
+ */ */
+static int32_t dwc_otg_hcd_stop_cb(void *_p) static int32_t dwc_otg_hcd_stop_cb(void *_p)
+{ {
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)_p; struct usb_hcd *usb_hcd = (struct usb_hcd *)_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p); DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_stop(usb_hcd); dwc_otg_hcd_stop(usb_hcd);
+ return 1; return 1;
+} }
+static void del_xfer_timers(dwc_otg_hcd_t * _hcd) static void del_xfer_timers(dwc_otg_hcd_t * _hcd)
+{ {
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ int i; int i;
+ int num_channels = _hcd->core_if->core_params->host_c int num_channels = _hcd->core_if->core_params->host_c
+ for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
+ del_timer(&_hcd->core_if->hc_xfer_timer[i]); del_timer(&_hcd->core_if->hc_xfer_timer[i]);
+ } }
+
+#endif /* */ #endif /* */
+} }
+static void del_timers(dwc_otg_hcd_t * _hcd) static void del_timers(dwc_otg_hcd_t * _hcd)
+{ {
+ del_xfer_timers(_hcd); del_xfer_timers(_hcd);
+ del_timer(&_hcd->conn_timer); del_timer(&_hcd->conn_timer);
+} }
+
+/** /**
+ * Processes all the URBs in a single list of QHs. Completes * Processes all the URBs in a single list of QHs. Completes
+ * -ETIMEDOUT and frees the QTD. * -ETIMEDOUT and frees the QTD.
+ */ */
+static void kill_urbs_in_qh_list(dwc_otg_hcd_t * _hcd, static void kill_urbs_in_qh_list(dwc_otg_hcd_t * _hcd,
+ struct list_head *_qh_list) struct list_head *_qh_list)
+{ {
+ struct list_head *qh_item; struct list_head *qh_item;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ struct list_head *qtd_item; struct list_head *qtd_item;
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+ list_for_each(qh_item, _qh_list) { list_for_each(qh_item, _qh_list) {
+ qh = list_entry(qh_item, dwc_otg_qh_t, qh_lis qh = list_entry(qh_item, dwc_otg_qh_t, qh_lis
+ for (qtd_item = qh->qtd_list.next; qtd_item ! for (qtd_item = qh->qtd_list.next; qtd_item !
+ qtd_item = qh->qtd_list.next) { qtd_item = qh->qtd_list.next) {
+ qtd = list_entry(qtd_item, dwc_otg_qt qtd = list_entry(qtd_item, dwc_otg_qt
+ if (qtd->urb != NULL) { if (qtd->urb != NULL) {
+ dwc_otg_hcd_complete_urb(_hcd dwc_otg_hcd_complete_urb(_hcd
+ } }
+ dwc_otg_hcd_qtd_remove_and_free(qtd); dwc_otg_hcd_qtd_remove_and_free(qtd);
+ } }
+ } }
+} }
+
+/** /**
+ * Responds with an error status of ETIMEDOUT to all URBs in * Responds with an error status of ETIMEDOUT to all URBs in
+ * and periodic schedules. The QTD associated with each URB i * and periodic schedules. The QTD associated with each URB i
+ * the schedule and freed. This function may be called when a * the schedule and freed. This function may be called when a
+ * detected or when the HCD is being stopped. * detected or when the HCD is being stopped.
+ */ */
+static void kill_all_urbs(dwc_otg_hcd_t * _hcd) static void kill_all_urbs(dwc_otg_hcd_t * _hcd)
+{ {
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_inac kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_inac
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_read kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_read
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assi kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assi
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queu kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queu
+} }
+
+/** /**
+ * HCD Callback function for disconnect of the HCD. * HCD Callback function for disconnect of the HCD.
+ * *
+ * @param _p void pointer to the <code>struct usb_hcd</code> * @param _p void pointer to the <code>struct usb_hcd</code>
+ */ */
+static int32_t dwc_otg_hcd_disconnect_cb(void *_p) static int32_t dwc_otg_hcd_disconnect_cb(void *_p)
+{ {
+ gintsts_data_t intr; gintsts_data_t intr;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p); dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+
+ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p); //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+
+ /* /*
+ * Set status flags for the hub driver. * Set status flags for the hub driver.
+ */ */
+ dwc_otg_hcd->flags.b.port_connect_status_change = 1; dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ dwc_otg_hcd->flags.b.port_connect_status = 0; dwc_otg_hcd->flags.b.port_connect_status = 0;
+
+ /* /*
+ * Shutdown any transfers in process by clearing the Tx F * Shutdown any transfers in process by clearing the Tx F
+ * interrupt mask and status bits and disabling subsequen * interrupt mask and status bits and disabling subsequen
+ * channel interrupts. * channel interrupts.
+ */ */
+ intr.d32 = 0; intr.d32 = 0;
+ intr.b.nptxfempty = 1; intr.b.nptxfempty = 1;
+ intr.b.ptxfempty = 1; intr.b.ptxfempty = 1;
+ intr.b.hcintr = 1; intr.b.hcintr = 1;
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_r dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_r
+ intr.d32, 0); intr.d32, 0);
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_r dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_r
+ intr.d32, 0); intr.d32, 0);
+ del_timers(dwc_otg_hcd); del_timers(dwc_otg_hcd);
+
+ /* /*
+ * Turn off the vbus power only if the core has transitio * Turn off the vbus power only if the core has transitio
+ * mode. If still in host mode, need to keep power on to * mode. If still in host mode, need to keep power on to
+ * reconnection. * reconnection.
+ */ */
+ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) { if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
+ if (dwc_otg_hcd->core_if->op_state != A_SUSPE if (dwc_otg_hcd->core_if->op_state != A_SUSPE
+ hprt0_data_t hprt0 = {.d32 = 0}; hprt0_data_t hprt0 = {.d32 = 0};
+ DWC_PRINT("Disconnect: PortPower off\ DWC_PRINT("Disconnect: PortPower off\
+ hprt0.b.prtpwr = 0; hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if- dwc_write_reg32(dwc_otg_hcd->core_if-
+ hprt0.d32); hprt0.d32);
+ } }
+ dwc_otg_disable_host_interrupts(dwc_otg_hcd-> dwc_otg_disable_host_interrupts(dwc_otg_hcd->
+ } }
+
+ /* Respond with an error status to all URBs in the schedu /* Respond with an error status to all URBs in the schedu
+ kill_all_urbs(dwc_otg_hcd); kill_all_urbs(dwc_otg_hcd);
+ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) { if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
+ /* Clean up any host channels that were in use. * /* Clean up any host channels that were in use. *
+ int num_channels; int num_channels;
+ int i; int i;
+ dwc_hc_t * channel; dwc_hc_t * channel;
+ dwc_otg_hc_regs_t * hc_regs; dwc_otg_hc_regs_t * hc_regs;
+ hcchar_data_t hcchar; hcchar_data_t hcchar;
+ num_channels = dwc_otg_hcd->core_if->core_par num_channels = dwc_otg_hcd->core_if->core_par
+ if (!dwc_otg_hcd->core_if->dma_enable) { if (!dwc_otg_hcd->core_if->dma_enable) {
+ /* Flush out any channel requests in slav /* Flush out any channel requests in slav
+ for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr channel = dwc_otg_hcd->hc_ptr
+ if (list_empty(&channel->hc_l if (list_empty(&channel->hc_l
+ hc_regs = dwc_otg_hcd hc_regs = dwc_otg_hcd
+ hcchar.d32 = dwc_read hcchar.d32 = dwc_read
+ if (hcchar.b.chen) { if (hcchar.b.chen) {
+ hcchar.b.chen hcchar.b.chen
+ hcchar.b.chdi hcchar.b.chdi
+ hcchar.b.epdi hcchar.b.epdi
+ dwc_write_reg dwc_write_reg
+ } }
+ } }
+ } }
+ } }
+ for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr_array[i channel = dwc_otg_hcd->hc_ptr_array[i
+ if (list_empty(&channel->hc_list_entr if (list_empty(&channel->hc_list_entr
+ hc_regs = dwc_otg_hcd->core_i hc_regs = dwc_otg_hcd->core_i
+ hcchar.d32 = dwc_read_reg32(& hcchar.d32 = dwc_read_reg32(&
+ if (hcchar.b.chen) { if (hcchar.b.chen) {
+ /* Halt the channel. */ /* Halt the channel. */
+ hcchar.b.chdis = 1; hcchar.b.chdis = 1;
+ dwc_write_reg32(&hc_r dwc_write_reg32(&hc_r
+ } }
+ dwc_otg_hc_cleanup(dwc_otg_hc dwc_otg_hc_cleanup(dwc_otg_hc
+ list_add_tail(&channel->hc_li list_add_tail(&channel->hc_li
+ &dwc_otg_hcd-> &dwc_otg_hcd->
+ } }
+ } }
+ } }
+
+ /* A disconnect will end the session so the B-Device is n /* A disconnect will end the session so the B-Device is n
+ * longer a B-host. */ * longer a B-host. */
+ ((struct usb_hcd *)_p)->self.is_b_host = 0; ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ return 1; return 1;
+} }
+
+/** /**
+ * Connection timeout function. An OTG host is required to d * Connection timeout function. An OTG host is required to d
+ * message if the device does not connect within 10 seconds. * message if the device does not connect within 10 seconds.
+ */ */
+void dwc_otg_hcd_connect_timeout(unsigned long _ptr) void dwc_otg_hcd_connect_timeout(unsigned long _ptr)
+{ {
+ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)_ptr DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)_ptr
+ DWC_PRINT("Connect Timeout\n"); DWC_PRINT("Connect Timeout\n");
+ DWC_ERROR("Device Not Connected/Responding\n"); DWC_ERROR("Device Not Connected/Responding\n");
+} }
+
+/** /**
+ * Start the connection timer. An OTG host is required to di * Start the connection timer. An OTG host is required to di
+ * message if the device does not connect within 10 seconds. * message if the device does not connect within 10 seconds.
+ * timer is deleted if a port connect interrupt occurs before * timer is deleted if a port connect interrupt occurs before
+ * timer expires. * timer expires.
+ */ */
+static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t * _ static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t * _
+{ {
+ init_timer(&_hcd->conn_timer); init_timer(&_hcd->conn_timer);
+ _hcd->conn_timer.function = dwc_otg_hcd_connect_timeo _hcd->conn_timer.function = dwc_otg_hcd_connect_timeo
+ _hcd->conn_timer.data = (unsigned long)0; _hcd->conn_timer.data = (unsigned long)0;
+ _hcd->conn_timer.expires = jiffies + (HZ * 10); _hcd->conn_timer.expires = jiffies + (HZ * 10);
+ add_timer(&_hcd->conn_timer); add_timer(&_hcd->conn_timer);
+} }
+
+/** /**
+ * HCD Callback function for disconnect of the HCD. * HCD Callback function for disconnect of the HCD.
+ * *
+ * @param _p void pointer to the <code>struct usb_hcd</code> * @param _p void pointer to the <code>struct usb_hcd</code>
+ */ */
+static int32_t dwc_otg_hcd_session_start_cb(void *_p) static int32_t dwc_otg_hcd_session_start_cb(void *_p)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p); dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p); DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd); dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
+ return 1; return 1;
+} }
+
+
+/** /**
+ * HCD Callback structure for handling mode switching. * HCD Callback structure for handling mode switching.
+ */ */
+static dwc_otg_cil_callbacks_t hcd_cil_callbacks = static dwc_otg_cil_callbacks_t hcd_cil_callbacks =
+{ {
+ .start = dwc_otg_hcd_start_cb, .start = dwc_otg_hcd_start_cb,
+ .stop = dwc_otg_hcd_stop_cb, .stop = dwc_otg_hcd_stop_cb,
+ .disconnect = dwc_otg_hcd_disconnect_cb, .disconnect = dwc_otg_hcd_disconnect_cb,
+ .session_start = dwc_otg_hcd_session_start_cb, .session_start = dwc_otg_hcd_session_start_cb,
+ .p = 0, .p = 0,
+}; };
+
+
+/** /**
+ * Reset tasklet function * Reset tasklet function
+ */ */
+static void reset_tasklet_func(unsigned long data) static void reset_tasklet_func(unsigned long data)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = (dwc_otg_hcd_t *) data; dwc_otg_hcd_t * dwc_otg_hcd = (dwc_otg_hcd_t *) data;
+ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if; dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0; hprt0_data_t hprt0;
+ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n"); DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if); hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1; hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ mdelay(60); mdelay(60);
+ hprt0.b.prtrst = 0; hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ dwc_otg_hcd->flags.b.port_reset_change = 1; dwc_otg_hcd->flags.b.port_reset_change = 1;
+ return; return;
+} }
+static struct tasklet_struct reset_tasklet = static struct tasklet_struct reset_tasklet =
+{ {
+ .next = NULL, .next = NULL,
+ .state = 0, .state = 0,
+ .count = ATOMIC_INIT(0), .count = ATOMIC_INIT(0),
+ .func = reset_tasklet_func, .func = reset_tasklet_func,
+ .data = 0, .data = 0,
+}; };
+
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET | #ifdef OTG_PLB_DMA_TASKLET
+/** /**
+ * plbdma tasklet function * plbdma tasklet function
+ */ */
+static void plbdma_tasklet_func(unsigned long data) static void plbdma_tasklet_func(unsigned long data)
+{ {
+ unsigned long flags; unsigned long flags;
+ dwc_otg_core_if_t * _core_if = (dwc_otg_core_if_t *) data dwc_otg_core_if_t * _core_if = (dwc_otg_core_if_t *) data
+ dma_xfer_t * dma_xfer = &_core_if->dma_xfer; dma_xfer_t * dma_xfer = &_core_if->dma_xfer;
+
+ local_irq_save(flags); local_irq_save(flags);
+ DWC_DEBUGPL(DBG_SP, "Plbdma tasklet called\n"); DWC_DEBUGPL(DBG_SP, "Plbdma tasklet called\n");
+
+ if (_core_if->dma_xfer.dma_dir == OTG_TX_DMA) { if (_core_if->dma_xfer.dma_dir == OTG_TX_DMA) {
+ if ((((unsigned long)dma_xfer->dma_data_buff) & 0 if ((((unsigned long)dma_xfer->dma_data_buff) & 0
+ /* call tx_dma - src,dest,len,intr */ /* call tx_dma - src,dest,len,intr */
+ ppc4xx_start_plb_dma(_core_if, (void *)dma_xf ppc4xx_start_plb_dma(_core_if, (void *)dma_xf
+ dma_xfer->dma_data_fifo, (dm dma_xfer->dma_data_fifo, (dm
+ PLB_DMA_CH, OTG_TX_DMA); PLB_DMA_CH, OTG_TX_DMA);
+ } else { } else {
+ ppc4xx_start_plb_dma(_core_if, (void *)get_unalig ppc4xx_start_plb_dma(_core_if, (void *)get_unalig
+ dma_xfer->dma_data_fifo, (dma dma_xfer->dma_data_fifo, (dma
+ PLB_DMA_CH, OTG_TX_DMA); PLB_DMA_CH, OTG_TX_DMA);
+ } }
+ } }
+ else { else {
+ DWC_DEBUGPL(DBG_HCD, "0x%p 0x%p %d\n", (void *)dm DWC_DEBUGPL(DBG_HCD, "0x%p 0x%p %d\n", (void *)dm
+ dma_xfer->dma_data_buff, dma_xfer->d dma_xfer->dma_data_buff, dma_xfer->d
+
+ ppc4xx_start_plb_dma(_core_if, (void *)dma_xf ppc4xx_start_plb_dma(_core_if, (void *)dma_xf
+ dma_xfer->dma_data_buff, (dma_xfer->d dma_xfer->dma_data_buff, (dma_xfer->d
+ PLB_DMA_CH, OTG_RX_DMA); PLB_DMA_CH, OTG_RX_DMA);
+ } }
+
+ local_irq_restore(flags); local_irq_restore(flags);
+ return; return;
+} }
+static struct tasklet_struct plbdma_tasklet = static struct tasklet_struct plbdma_tasklet =
+{ {
+ .next = NULL, .next = NULL,
+ .state = 0, .state = 0,
+ .count = ATOMIC_INIT(0), .count = ATOMIC_INIT(0),
+ .func = plbdma_tasklet_func, .func = plbdma_tasklet_func,
+ .data = 0, .data = 0,
+}; };
+
+#endif #endif
+
+/** /**
+ * Initializes the HCD. This function allocates memory for an * Initializes the HCD. This function allocates memory for an
+ * static parts of the usb_hcd and dwc_otg_hcd structures. It * static parts of the usb_hcd and dwc_otg_hcd structures. It
+ * USB bus with the core and calls the hc_driver->start() fun * USB bus with the core and calls the hc_driver->start() fun
+ * a negative error on failure. * a negative error on failure.
+ */ */
+int init_hcd_usecs(dwc_otg_hcd_t *_hcd); <
+ <
+int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_de int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_de
+{ {
+ struct usb_hcd *hcd = NULL; struct usb_hcd *hcd = NULL;
+ dwc_otg_hcd_t * dwc_otg_hcd = NULL; dwc_otg_hcd_t * dwc_otg_hcd = NULL;
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev); dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ int num_channels; int num_channels;
+ int i; int i;
+ dwc_hc_t * channel; dwc_hc_t * channel;
+ int retval = 0; int retval = 0;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
+ /* /*
+ * Allocate memory for the base HCD plus the DWC OTG HCD. * Allocate memory for the base HCD plus the DWC OTG HCD.
+ * Initialize the base HCD. * Initialize the base HCD.
+ */ */
+ hcd = usb_create_hcd(&dwc_otg_hc_driver, _dev, dev_name(_ hcd = usb_create_hcd(&dwc_otg_hc_driver, _dev, dev_name(_
+ if (hcd == NULL) { if (hcd == NULL) {
+ retval = -ENOMEM; retval = -ENOMEM;
+ goto error1; goto error1;
+ } }
+ dev_set_drvdata(_dev, dwc_otg_device); /* fscz restor dev_set_drvdata(_dev, dwc_otg_device); /* fscz restor
+ hcd->regs = otg_dev->base; hcd->regs = otg_dev->base;
+ hcd->self.otg_port = 1; hcd->self.otg_port = 1;
+
+ /* Initialize the DWC OTG HCD. */ /* Initialize the DWC OTG HCD. */
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+ dwc_otg_hcd->core_if = otg_dev->core_if; dwc_otg_hcd->core_if = otg_dev->core_if;
+ otg_dev->hcd = dwc_otg_hcd; otg_dev->hcd = dwc_otg_hcd;
+ spin_lock_init(&dwc_otg_hcd->lock); <
+ /* Register the HCD CIL Callbacks */ /* Register the HCD CIL Callbacks */
+ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if, dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
+ &hcd_cil_callbacks, hc &hcd_cil_callbacks, hc
+
+ /* Initialize the non-periodic schedule. */ /* Initialize the non-periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive) INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive)
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_activ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_activ
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_defer INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_defer
+
+ /* Initialize the periodic schedule. */ /* Initialize the periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive); INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready); INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned) INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned)
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued); INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+
+ /* /*
+ * Create a host channel descriptor for each host channel * Create a host channel descriptor for each host channel
+ * in the controller. Initialize the channel descriptor a * in the controller. Initialize the channel descriptor a
+ */ */
+ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list); INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
+ num_channels = dwc_otg_hcd->core_if->core_params->hos num_channels = dwc_otg_hcd->core_if->core_params->hos
+ for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
+ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNE channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNE
+ if (channel == NULL) { if (channel == NULL) {
+ retval = -ENOMEM; retval = -ENOMEM;
+ DWC_ERROR("%s: host channel allocatio DWC_ERROR("%s: host channel allocatio
+ goto error2; goto error2;
+ } }
+ memset(channel, 0, sizeof(dwc_hc_t)); memset(channel, 0, sizeof(dwc_hc_t));
+ channel->hc_num = i; channel->hc_num = i;
+ dwc_otg_hcd->hc_ptr_array[i] = channel; dwc_otg_hcd->hc_ptr_array[i] = channel;
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ init_timer(&dwc_otg_hcd->core_if->hc_xfer init_timer(&dwc_otg_hcd->core_if->hc_xfer
+#endif /* */ #endif /* */
+ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc= DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=
+ } }
+
+ /* Initialize the Connection timeout timer. */ /* Initialize the Connection timeout timer. */
+ init_timer(&dwc_otg_hcd->conn_timer); init_timer(&dwc_otg_hcd->conn_timer);
+
+ /* Initialize reset tasklet. */ /* Initialize reset tasklet. */
+ reset_tasklet.data = (unsigned long)dwc_otg_hcd; reset_tasklet.data = (unsigned long)dwc_otg_hcd;
+ dwc_otg_hcd->reset_tasklet = &reset_tasklet; dwc_otg_hcd->reset_tasklet = &reset_tasklet;
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET | #ifdef OTG_PLB_DMA_TASKLET
+ /* Initialize plbdma tasklet. */ /* Initialize plbdma tasklet. */
+ plbdma_tasklet.data = (unsigned long)dwc_otg_hcd->core_if plbdma_tasklet.data = (unsigned long)dwc_otg_hcd->core_if
+ dwc_otg_hcd->core_if->plbdma_tasklet = &plbdma_tasklet; dwc_otg_hcd->core_if->plbdma_tasklet = &plbdma_tasklet;
+#endif #endif
+
+ /* Set device flags indicating whether the HCD suppor /* Set device flags indicating whether the HCD suppor
+ if (otg_dev->core_if->dma_enable) { if (otg_dev->core_if->dma_enable) {
+ DWC_PRINT("Using DMA mode\n"); DWC_PRINT("Using DMA mode\n");
+ _dev->dma_mask = &dma_mask; _dev->dma_mask = &dma_mask;
+ _dev->coherent_dma_mask = DMA_BIT_MASK(32); _dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ } else { } else {
+ DWC_PRINT("Using Slave mode\n"); DWC_PRINT("Using Slave mode\n");
+ _dev->dma_mask = (void *)0; _dev->dma_mask = (void *)0;
+ _dev->coherent_dma_mask = 0; _dev->coherent_dma_mask = 0;
+ } }
+ <
+ init_hcd_usecs(dwc_otg_hcd); <
+ <
+ /* /*
+ * Finish generic HCD initialization and start the HC * Finish generic HCD initialization and start the HC
+ * allocates the DMA buffer pool, registers the USB b * allocates the DMA buffer pool, registers the USB b
+ * IRQ line, and calls dwc_otg_hcd_start method. * IRQ line, and calls dwc_otg_hcd_start method.
+ */ */
+ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED); retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED);
+ if (retval < 0) { if (retval < 0) {
+ goto error2; goto error2;
+ } }
+ /* /*
+ * Allocate space for storing data on status transactions * Allocate space for storing data on status transactions
+ * data is sent, but this space acts as a bit bucket. Thi * data is sent, but this space acts as a bit bucket. Thi
+ * done after usb_add_hcd since that function allocates t * done after usb_add_hcd since that function allocates t
+ * pool. * pool.
+ */ */
+ if (otg_dev->core_if->dma_enable) { if (otg_dev->core_if->dma_enable) {
+ dwc_otg_hcd->status_buf = dwc_otg_hcd->status_buf =
+ dma_alloc_coherent(_dev, DWC_OTG_HCD_STAT dma_alloc_coherent(_dev, DWC_OTG_HCD_STAT
+ &dwc_otg_hcd->status_ &dwc_otg_hcd->status_
+ } else { } else {
+ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD
+ } }
+ if (dwc_otg_hcd->status_buf == NULL) { if (dwc_otg_hcd->status_buf == NULL) {
+ retval = -ENOMEM; retval = -ENOMEM;
+ DWC_ERROR("%s: status_buf allocation failed\n DWC_ERROR("%s: status_buf allocation failed\n
+ goto error3; goto error3;
+ } }
+ DWC_DEBUGPL(DBG_HCD, DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD Initialized HCD, usbbus=%d | "DWC OTG HCD Initialized HCD, bus=%s, u
+ hcd->self.busnum); | _dev->bus_id, hcd->self.busnum);
+ return 0; return 0;
+
+ /* Error conditions */ /* Error conditions */
+ error3:usb_remove_hcd(hcd); error3:usb_remove_hcd(hcd);
+ error2:dwc_otg_hcd_free(hcd); error2:dwc_otg_hcd_free(hcd);
+ usb_put_hcd(hcd); usb_put_hcd(hcd);
+ error1:return retval; error1:return retval;
+} }
+
+
+/** /**
+ * Removes the HCD. * Removes the HCD.
+ * Frees memory and resources associated with the HCD and der * Frees memory and resources associated with the HCD and der
+ */ */
+void dwc_otg_hcd_remove(struct device *_dev) void dwc_otg_hcd_remove(struct device *_dev)
+{ {
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev); dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_t * dwc_otg_hcd = otg_dev->hcd; dwc_otg_hcd_t * dwc_otg_hcd = otg_dev->hcd;
+ struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd) struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd)
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
+
+ /* Turn off all interrupts */ /* Turn off all interrupts */
+ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs-> dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_r dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_r
+ usb_remove_hcd(hcd); usb_remove_hcd(hcd);
+ dwc_otg_hcd_free(hcd); dwc_otg_hcd_free(hcd);
+ usb_put_hcd(hcd); usb_put_hcd(hcd);
+ return; return;
+} }
+
+
+/* ========================================================== /* ==========================================================
+ * Linux HC Driver Functions * Linux HC Driver Functions
+ * ========================================================== * ==========================================================
+
+/** /**
+ * Initializes dynamic portions of the DWC_otg HCD state. * Initializes dynamic portions of the DWC_otg HCD state.
+ */ */
+static void hcd_reinit(dwc_otg_hcd_t * _hcd) static void hcd_reinit(dwc_otg_hcd_t * _hcd)
+{ {
+ struct list_head *item; struct list_head *item;
+ int num_channels; int num_channels;
+ int i; int i;
+ dwc_hc_t * channel; dwc_hc_t * channel;
+ _hcd->flags.d32 = 0; _hcd->flags.d32 = 0;
+ _hcd->non_periodic_qh_ptr = &_hcd->non_periodic_sched _hcd->non_periodic_qh_ptr = &_hcd->non_periodic_sched
+ _hcd->available_host_channels = _hcd->core_if->core_p | _hcd->non_periodic_channels = 0;
+ > _hcd->periodic_channels = 0;
+
+ /* /*
+ * Put all channels in the free channel list and clean up * Put all channels in the free channel list and clean up
+ * states. * states.
+ */ */
+ item = _hcd->free_hc_list.next; item = _hcd->free_hc_list.next;
+ while (item != &_hcd->free_hc_list) { while (item != &_hcd->free_hc_list) {
+ list_del(item); list_del(item);
+ item = _hcd->free_hc_list.next; item = _hcd->free_hc_list.next;
+ } }
+ num_channels = _hcd->core_if->core_params->host_chann num_channels = _hcd->core_if->core_params->host_chann
+ for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
+ channel = _hcd->hc_ptr_array[i]; channel = _hcd->hc_ptr_array[i];
+ list_add_tail(&channel->hc_list_entry, &_hcd- list_add_tail(&channel->hc_list_entry, &_hcd-
+ dwc_otg_hc_cleanup(_hcd->core_if, channel); dwc_otg_hc_cleanup(_hcd->core_if, channel);
+ } }
+
+ /* Initialize the DWC core for host mode operation. */ /* Initialize the DWC core for host mode operation. */
+ dwc_otg_core_host_init(_hcd->core_if); dwc_otg_core_host_init(_hcd->core_if);
+} }
+
+
+/** Initializes the DWC_otg controller and its root hub and p /** Initializes the DWC_otg controller and its root hub and p
+ * mode operation. Activates the root port. Returns 0 on succ * mode operation. Activates the root port. Returns 0 on succ
+ * error code on failure. */ * error code on failure. */
+int dwc_otg_hcd_start(struct usb_hcd *_hcd) int dwc_otg_hcd_start(struct usb_hcd *_hcd)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ //dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if; //dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ struct usb_device *udev; struct usb_device *udev;
+ struct usb_bus *bus; struct usb_bus *bus;
+
+// int retval; // int retval;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
+ bus = hcd_to_bus(_hcd); bus = hcd_to_bus(_hcd);
+
+ /* Initialize the bus state. If the core is in Device Mo /* Initialize the bus state. If the core is in Device Mo
+ * HALT the USB bus and return. */ * HALT the USB bus and return. */
+
+ _hcd->state = HC_STATE_RUNNING; _hcd->state = HC_STATE_RUNNING;
+
+ /* Initialize and connect root hub if one is not already /* Initialize and connect root hub if one is not already
+ if (bus->root_hub) { if (bus->root_hub) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hu DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hu
+
+ /* Inform the HUB driver to resume. */ /* Inform the HUB driver to resume. */
+ usb_hcd_resume_root_hub(_hcd); usb_hcd_resume_root_hub(_hcd);
+ } }
+
+ else { else {
+ udev = usb_alloc_dev(NULL, bus, 0); udev = usb_alloc_dev(NULL, bus, 0);
+ udev->speed = USB_SPEED_HIGH; udev->speed = USB_SPEED_HIGH;
+ if (!udev) { if (!udev) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Err DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Err
+ return -ENODEV; return -ENODEV;
+ } }
+
+ /* Not needed - VJ /* Not needed - VJ
+ if ((retval = usb_hcd_register_root_hub(udev, if ((retval = usb_hcd_register_root_hub(udev,
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error re DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error re
+ return -ENODEV; return -ENODEV;
+ } }
+ */ */
+ } }
+ hcd_reinit(dwc_otg_hcd); hcd_reinit(dwc_otg_hcd);
+ return 0; return 0;
+} }
+static void qh_list_free(dwc_otg_hcd_t * _hcd, struct list_he static void qh_list_free(dwc_otg_hcd_t * _hcd, struct list_he
+{ {
+ struct list_head *item; struct list_head *item;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ if (_qh_list->next == NULL) { if (_qh_list->next == NULL) {
+ /* The list hasn't been initialized yet. */ /* The list hasn't been initialized yet. */
+ return; return;
+ } }
+
+ /* Ensure there are no QTDs or URBs left. */ | /* Ensure there are no QTDs or URBs left. */
+ kill_urbs_in_qh_list(_hcd, _qh_list); | kill_urbs_in_qh_list(_hcd, _qh_list);
+ for (item = _qh_list->next; item != _qh_list; item = for (item = _qh_list->next; item != _qh_list; item =
+ qh = list_entry(item, dwc_otg_qh_t, qh_list_e qh = list_entry(item, dwc_otg_qh_t, qh_list_e
+ dwc_otg_hcd_qh_remove_and_free(_hcd, qh); dwc_otg_hcd_qh_remove_and_free(_hcd, qh);
+ } }
+} }
+
+
+/** /**
+ * Halts the DWC_otg host mode operations in a clean manner. * Halts the DWC_otg host mode operations in a clean manner.
+ * stopped. * stopped.
+ */ */
+void dwc_otg_hcd_stop(struct usb_hcd *_hcd) void dwc_otg_hcd_stop(struct usb_hcd *_hcd)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ hprt0_data_t hprt0 = {.d32 = 0}; hprt0_data_t hprt0 = {.d32 = 0};
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
+
+ /* Turn off all host-specific interrupts. */ /* Turn off all host-specific interrupts. */
+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
+
+ /* /*
+ * The root hub should be disconnected before this functi * The root hub should be disconnected before this functi
+ * The disconnect will clear the QTD lists (via ..._hcd_u * The disconnect will clear the QTD lists (via ..._hcd_u
+ * and the QH lists (via ..._hcd_endpoint_disable). * and the QH lists (via ..._hcd_endpoint_disable).
+ */ */
+
+ /* Turn off the vbus power */ /* Turn off the vbus power */
+ DWC_PRINT("PortPower off\n"); DWC_PRINT("PortPower off\n");
+ hprt0.b.prtpwr = 0; hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0,
+ return; return;
+} }
+
+
+/** Returns the current frame number. */ /** Returns the current frame number. */
+int dwc_otg_hcd_get_frame_number(struct usb_hcd *_hcd) int dwc_otg_hcd_get_frame_number(struct usb_hcd *_hcd)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ hfnum_data_t hfnum; hfnum_data_t hfnum;
+ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->hos hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->hos
+ host_global_regs->hfn host_global_regs->hfn
+
+#ifdef DEBUG_SOF #ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMB DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMB
+ hfnum.b.frnum); hfnum.b.frnum);
+#endif /* */ #endif /* */
+ return hfnum.b.frnum; return hfnum.b.frnum;
+} }
+
+
+/** /**
+ * Frees secondary storage associated with the dwc_otg_hcd st * Frees secondary storage associated with the dwc_otg_hcd st
+ * in the struct usb_hcd field. * in the struct usb_hcd field.
+ */ */
+void dwc_otg_hcd_free(struct usb_hcd *_hcd) void dwc_otg_hcd_free(struct usb_hcd *_hcd)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ int i; int i;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
+ del_timers(dwc_otg_hcd); del_timers(dwc_otg_hcd);
+
+ /* Free memory for QH/QTD lists */ /* Free memory for QH/QTD lists */
+ qh_list_free(dwc_otg_hcd, qh_list_free(dwc_otg_hcd,
+ &dwc_otg_hcd->non_periodic_sched_inactive); &dwc_otg_hcd->non_periodic_sched_inactive);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sche
+
+ /* Free memory for the host channels. */ /* Free memory for the host channels. */
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) { for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ dwc_hc_t * hc = dwc_otg_hcd->hc_ptr_array[i]; dwc_hc_t * hc = dwc_otg_hcd->hc_ptr_array[i];
+ if (hc != NULL) { if (hc != NULL) {
+ DWC_DEBUGPL(DBG_HCDV, "HCD Free chann DWC_DEBUGPL(DBG_HCDV, "HCD Free chann
+ kfree(hc); kfree(hc);
+ } }
+ } }
+ if (dwc_otg_hcd->core_if->dma_enable) { if (dwc_otg_hcd->core_if->dma_enable) {
+ if (dwc_otg_hcd->status_buf_dma) { if (dwc_otg_hcd->status_buf_dma) {
+ dma_free_coherent(_hcd->self.controll dma_free_coherent(_hcd->self.controll
+ DWC_OTG_HCD_STATUS DWC_OTG_HCD_STATUS
+ dwc_otg_hcd->statu dwc_otg_hcd->statu
+ dwc_otg_hcd->statu dwc_otg_hcd->statu
+ } }
+ } else if (dwc_otg_hcd->status_buf != NULL) { } else if (dwc_otg_hcd->status_buf != NULL) {
+ kfree(dwc_otg_hcd->status_buf); kfree(dwc_otg_hcd->status_buf);
+ } }
+ return; return;
+} }
+
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+static void dump_urb_info(struct urb *_urb, char *_fn_name) static void dump_urb_info(struct urb *_urb, char *_fn_name)
+{ {
+ DWC_PRINT("%s, urb %p\n", _fn_name, _urb); DWC_PRINT("%s, urb %p\n", _fn_name, _urb);
+ DWC_PRINT(" Device address: %d\n", usb_pipedevice(_u DWC_PRINT(" Device address: %d\n", usb_pipedevice(_u
+ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(_u DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(_u
+ (usb_pipein(_urb->pipe) ? "IN" : "OUT")); (usb_pipein(_urb->pipe) ? "IN" : "OUT"));
+ DWC_PRINT(" Endpoint type: %s\n", ( { DWC_PRINT(" Endpoint type: %s\n", ( {
+ char *pipetype; char *pipetype;
+ switch (usb_pipetype(_urb->pipe)) { switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ pipetype = "CONTROL"; break; pipetype = "CONTROL"; break;
+ case PIPE_BULK: case PIPE_BULK:
+ pipetype = "BULK"; break; pipetype = "BULK"; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ pipetype = "INTERRUPT"; break pipetype = "INTERRUPT"; break
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS"; bre pipetype = "ISOCHRONOUS"; bre
+ default: default:
+ pipetype = "UNKNOWN"; break; pipetype = "UNKNOWN"; break;
+ }; };
+ pipetype; pipetype;
+ } )) ; } )) ;
+ DWC_PRINT(" Speed: %s\n", ( { DWC_PRINT(" Speed: %s\n", ( {
+ char *speed; char *speed;
+ switch (_urb->dev->speed) { switch (_urb->dev->speed) {
+ case USB_SPEED_HIGH: case USB_SPEED_HIGH:
+ speed = "HIGH"; break; speed = "HIGH"; break;
+ case USB_SPEED_FULL: case USB_SPEED_FULL:
+ speed = "FULL"; break; speed = "FULL"; break;
+ case USB_SPEED_LOW: case USB_SPEED_LOW:
+ speed = "LOW"; break; speed = "LOW"; break;
+ default: default:
+ speed = "UNKNOWN"; break; speed = "UNKNOWN"; break;
+ }; };
+ speed; speed;
+ } )) ; } )) ;
+ DWC_PRINT(" Max packet size: %d\n", DWC_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(_urb->dev, _urb->pipe, usb_p usb_maxpacket(_urb->dev, _urb->pipe, usb_p
+ DWC_PRINT(" Data buffer length: %d\n", _urb->transfe DWC_PRINT(" Data buffer length: %d\n", _urb->transfe
+ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n" DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n"
+ _urb->transfer_buffer, (void *)_urb->trans _urb->transfer_buffer, (void *)_urb->trans
+ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", _urb DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", _urb
+ (void *)_urb->setup_dma); (void *)_urb->setup_dma);
+ DWC_PRINT(" Interval: %d\n", _urb->interval); DWC_PRINT(" Interval: %d\n", _urb->interval);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) { if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i; int i;
+ for (i = 0; i < _urb->number_of_packets; i++) for (i = 0; i < _urb->number_of_packets; i++)
+ DWC_PRINT(" ISO Desc %d:\n", i); DWC_PRINT(" ISO Desc %d:\n", i);
+ DWC_PRINT(" offset: %d, length %d\ DWC_PRINT(" offset: %d, length %d\
+ _urb->iso_frame_desc[i].of _urb->iso_frame_desc[i].of
+ _urb->iso_frame_desc[i].le _urb->iso_frame_desc[i].le
+ } }
+ } }
+} }
+static void dump_channel_info(dwc_otg_hcd_t * _hcd, dwc_otg_ static void dump_channel_info(dwc_otg_hcd_t * _hcd, dwc_otg_
+{ {
+ if (qh->channel != NULL) { if (qh->channel != NULL) {
+ dwc_hc_t * hc = qh->channel; dwc_hc_t * hc = qh->channel;
+ struct list_head *item; struct list_head *item;
+ dwc_otg_qh_t * qh_item; dwc_otg_qh_t * qh_item;
+ int num_channels = _hcd->core_if->core_params int num_channels = _hcd->core_if->core_params
+ int i; int i;
+ dwc_otg_hc_regs_t * hc_regs; dwc_otg_hc_regs_t * hc_regs;
+ hcchar_data_t hcchar; hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt; hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ uint32_t hcdma; uint32_t hcdma;
+ hc_regs = _hcd->core_if->host_if->hc_regs[hc- hc_regs = _hcd->core_if->host_if->hc_regs[hc-
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar) hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar)
+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt) hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt)
+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz) hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz)
+ hcdma = dwc_read_reg32(&hc_regs->hcdma); hcdma = dwc_read_reg32(&hc_regs->hcdma);
+ DWC_PRINT(" Assigned to channel %p:\n", hc); DWC_PRINT(" Assigned to channel %p:\n", hc);
+ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n
+ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n" DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n"
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_i DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_i
+ hc->dev_addr, hc->ep_num, hc->ep_i hc->dev_addr, hc->ep_num, hc->ep_i
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type); DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_pac DWC_PRINT(" max_packet: %d\n", hc->max_pac
+ DWC_PRINT(" data_pid_start: %d\n", hc->dat DWC_PRINT(" data_pid_start: %d\n", hc->dat
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_ DWC_PRINT(" xfer_started: %d\n", hc->xfer_
+ DWC_PRINT(" halt_status: %d\n", hc->halt_s DWC_PRINT(" halt_status: %d\n", hc->halt_s
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buf DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buf
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len) DWC_PRINT(" xfer_len: %d\n", hc->xfer_len)
+ DWC_PRINT(" qh: %p\n", hc->qh); DWC_PRINT(" qh: %p\n", hc->qh);
+ DWC_PRINT(" NP inactive sched:\n"); DWC_PRINT(" NP inactive sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched list_for_each(item, &_hcd->non_periodic_sched
+ qh_item = list_entry(item, dwc_otg_qh qh_item = list_entry(item, dwc_otg_qh
+ DWC_PRINT(" %p\n", qh_item); DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP active sched:\n"); } DWC_PRINT(" NP active sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched list_for_each(item, &_hcd->non_periodic_sched
+ qh_item = list_entry(item, dwc_otg_qh qh_item = list_entry(item, dwc_otg_qh
+ DWC_PRINT(" %p\n", qh_item); DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP deferred sched:\n"); } DWC_PRINT(" NP deferred sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched list_for_each(item, &_hcd->non_periodic_sched
+ qh_item = list_entry(item, dwc_otg_qh qh_item = list_entry(item, dwc_otg_qh
+ DWC_PRINT(" %p\n", qh_item); DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" Channels: \n"); } DWC_PRINT(" Channels: \n");
+ for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
+ dwc_hc_t * hc = _hcd->hc_ptr_array[i] dwc_hc_t * hc = _hcd->hc_ptr_array[i]
+ DWC_PRINT(" %2d: %p\n", i, hc); DWC_PRINT(" %2d: %p\n", i, hc);
+ } }
+ } }
+} }
+
+#endif /* */ #endif /* */
+
+/** Starts processing a USB transfer request specified by a U /** Starts processing a USB transfer request specified by a U
+ * (URB). mem_flags indicates the type of memory allocation t * (URB). mem_flags indicates the type of memory allocation t
+ * processing this URB. */ * processing this URB. */
+int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd, int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd,
+ struct urb *_urb, struct urb *_urb,
+ gfp_t _mem_flags) gfp_t _mem_flags)
+{ {
+ unsigned long flags; unsigned long flags;
+ int retval; int retval;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+
+ local_irq_save(flags); local_irq_save(flags);
+ retval = usb_hcd_link_urb_to_ep(_hcd, _urb); retval = usb_hcd_link_urb_to_ep(_hcd, _urb);
+ if (retval) { if (retval) {
+ local_irq_restore(flags); local_irq_restore(flags);
+ return retval; return retval;
+ } }
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_enqueue" dump_urb_info(_urb, "dwc_otg_hcd_urb_enqueue"
+ } }
+#endif /* */ #endif /* */
+ if (!dwc_otg_hcd->flags.b.port_connect_status) { if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /* No longer connected. */ /* No longer connected. */
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb); <
+ local_irq_restore(flags); local_irq_restore(flags);
+ return -ENODEV; return -ENODEV;
+ } }
+ qtd = dwc_otg_hcd_qtd_create(_urb); qtd = dwc_otg_hcd_qtd_create(_urb);
+ if (qtd == NULL) { if (qtd == NULL) {
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb); <
+ local_irq_restore(flags); local_irq_restore(flags);
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed cre DWC_ERROR("DWC OTG HCD URB Enqueue failed cre
+ return -ENOMEM; return -ENOMEM;
+ } }
+ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd); retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd);
+ if (retval < 0) { if (retval < 0) {
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed add DWC_ERROR("DWC OTG HCD URB Enqueue failed add
+ "Error status %d\n", retval); "Error status %d\n", retval);
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb); <
+ dwc_otg_hcd_qtd_free(qtd); dwc_otg_hcd_qtd_free(qtd);
+ } }
+ local_irq_restore(flags); local_irq_restore(flags);
+ return retval; return retval;
+} }
+
+
+/** Aborts/cancels a USB transfer request. Always returns 0 t /** Aborts/cancels a USB transfer request. Always returns 0 t
+ * success. */ * success. */
+int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb
+{ {
+ unsigned long flags; unsigned long flags;
+ dwc_otg_hcd_t * dwc_otg_hcd; dwc_otg_hcd_t * dwc_otg_hcd;
+ dwc_otg_qtd_t * urb_qtd; dwc_otg_qtd_t * urb_qtd;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ struct usb_host_endpoint *_ep = dwc_urb_to_endpoint(_ <
+ int retval; int retval;
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
+ <
+ if (!_ep) <
+ return -EINVAL; <
+ <
+ local_irq_save(flags); local_irq_save(flags);
+ retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status
+ if (retval) { if (retval) {
+ local_irq_restore(flags); local_irq_restore(flags);
+ return retval; return retval;
+ } }
+
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd); dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+
+ urb_qtd = (dwc_otg_qtd_t *) _urb->hcpriv; urb_qtd = (dwc_otg_qtd_t *) _urb->hcpriv;
+ if (urb_qtd == NULL) { if (urb_qtd == NULL) {
+ printk("urb_qtd is NULL for _urb %08x\n",(uns printk("urb_qtd is NULL for _urb %08x\n",(uns
+ goto done; goto done;
+ } }
+ qh = (dwc_otg_qh_t *) urb_qtd->qtd_qh_ptr; qh = (dwc_otg_qh_t *) urb_qtd->qtd_qh_ptr;
+ if (qh == NULL) { if (qh == NULL) {
+ goto done; goto done;
+ } }
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_dequeue" dump_urb_info(_urb, "dwc_otg_hcd_urb_dequeue"
+ if (urb_qtd == qh->qtd_in_process) { if (urb_qtd == qh->qtd_in_process) {
+ dump_channel_info(dwc_otg_hcd, qh); dump_channel_info(dwc_otg_hcd, qh);
+ } }
+ } }
+
+#endif /* */ #endif /* */
+ if (urb_qtd == qh->qtd_in_process) { if (urb_qtd == qh->qtd_in_process) {
+ /* The QTD is in process (it has been assigned to /* The QTD is in process (it has been assigned to
+ if (dwc_otg_hcd->flags.b.port_connect_status) { if (dwc_otg_hcd->flags.b.port_connect_status) {
+
+ /* /*
+ * If still connected (i.e. in host mode) * If still connected (i.e. in host mode)
+ * channel so it can be used for other tr * channel so it can be used for other tr
+ * no longer connected, the host register * no longer connected, the host register
+ * written to halt the channel since the * written to halt the channel since the
+ * device mode. * device mode.
+ */ */
+ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh- dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh-
+ DWC_OTG_HC_XFER_U DWC_OTG_HC_XFER_U
+ } }
+ } }
+
+ /* /*
+ * Free the QTD and clean up the associated QH. Leave the * Free the QTD and clean up the associated QH. Leave the
+ * schedule if it has any remaining QTDs. * schedule if it has any remaining QTDs.
+ */ */
+ dwc_otg_hcd_qtd_remove_and_free(urb_qtd); dwc_otg_hcd_qtd_remove_and_free(urb_qtd);
+ if (urb_qtd == qh->qtd_in_process) { if (urb_qtd == qh->qtd_in_process) {
+ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0) dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0)
+ qh->channel = NULL; qh->channel = NULL;
+ qh->qtd_in_process = NULL; qh->qtd_in_process = NULL;
+ } else if (list_empty(&qh->qtd_list)) { } else if (list_empty(&qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
+ } }
+done: done:
+ > local_irq_restore(flags);
+ _urb->hcpriv = NULL; _urb->hcpriv = NULL;
+
+ /* Higher layer software sets URB status. */ /* Higher layer software sets URB status. */
+ > #if 1 /* Fixed bug relate kernel hung when unplug cable */
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb); usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ usb_hcd_giveback_urb(_hcd, _urb, _status); usb_hcd_giveback_urb(_hcd, _urb, _status);
+ <
+ local_irq_restore(flags); <
+ <
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("Called usb_hcd_giveback_urb()\n"); DWC_PRINT("Called usb_hcd_giveback_urb()\n");
+ DWC_PRINT(" urb->status = %d\n", _status); DWC_PRINT(" urb->status = %d\n", _status);
+ } }
+ > #else
+ > if (_status != -ECONNRESET) {
+ > usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ > usb_hcd_giveback_urb(_hcd, _urb, _status);
+ > if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB))
+ > DWC_PRINT("Called usb_hcd_giveback_ur
+ > DWC_PRINT(" urb->status = %d\n", _st
+ > }
+ > }
+ > #endif
+ return 0; return 0;
+} }
+
+
+/** Frees resources in the DWC_otg controller related to a gi /** Frees resources in the DWC_otg controller related to a gi
+ * clears state in the HCD related to the endpoint. Any URBs * clears state in the HCD related to the endpoint. Any URBs
+ * must already be dequeued. */ * must already be dequeued. */
+void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd, void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd,
+ struct usb_host_endpoint *_ struct usb_host_endpoint *_
+{ {
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpo DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpo
+ "endpoint=%d\n", _ep->desc.bEndpointAdd "endpoint=%d\n", _ep->desc.bEndpointAdd
+ dwc_ep_addr_to_endpoint(_ep->desc.bEndp dwc_ep_addr_to_endpoint(_ep->desc.bEndp
+ qh = (dwc_otg_qh_t *) (_ep->hcpriv); qh = (dwc_otg_qh_t *) (_ep->hcpriv);
+ if (qh != NULL) { if (qh != NULL) {
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ /** Check that the QTD list is really empty * /** Check that the QTD list is really empty *
+ if (!list_empty(&qh->qtd_list)) { if (!list_empty(&qh->qtd_list)) {
+ DWC_WARN("DWC OTG HCD EP DISABLE:" DWC_WARN("DWC OTG HCD EP DISABLE:"
+ " QTD List for this endpoin " QTD List for this endpoin
+ } }
+
+#endif /* */ #endif /* */
+ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh);
+ _ep->hcpriv = NULL; _ep->hcpriv = NULL;
+ } }
+ return; return;
+} }
+
+ > extern int fscz_debug;
+/** Handles host mode interrupts for the DWC_otg controller. /** Handles host mode interrupts for the DWC_otg controller.
+ * there was no interrupt to handle. Returns IRQ_HANDLED if t * there was no interrupt to handle. Returns IRQ_HANDLED if t
+ * interrupt. * interrupt.
+ * *
+ * This function is called by the USB core when an interrupt * This function is called by the USB core when an interrupt
+irqreturn_t dwc_otg_hcd_irq(struct usb_hcd * _hcd) irqreturn_t dwc_otg_hcd_irq(struct usb_hcd * _hcd)
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd
+} }
+
+/** Creates Status Change bitmap for the root hub and root po /** Creates Status Change bitmap for the root hub and root po
+ * returned in buf. Bit 0 is the status change indicator for * returned in buf. Bit 0 is the status change indicator for
+ * is the status change indicator for the single root port. R * is the status change indicator for the single root port. R
+ * change indicator is 1, otherwise returns 0. */ * change indicator is 1, otherwise returns 0. */
+int dwc_otg_hcd_hub_status_data(struct usb_hcd *_hcd, char *_ int dwc_otg_hcd_hub_status_data(struct usb_hcd *_hcd, char *_
+{ {
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ _buf[0] = 0; _buf[0] = 0;
+ _buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_ _buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_
+ || dwc_otg_hcd->flags.b.port_reset_chang || dwc_otg_hcd->flags.b.port_reset_chang
+ || dwc_otg_hcd->flags.b.port_enable_chan || dwc_otg_hcd->flags.b.port_enable_chan
+ || dwc_otg_hcd->flags.b.port_suspend_cha || dwc_otg_hcd->flags.b.port_suspend_cha
+ || dwc_otg_hcd->flags.b.port_over_curren || dwc_otg_hcd->flags.b.port_over_curren
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ if (_buf[0]) { if (_buf[0]) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS
+ " Root port status changed\n"); " Root port status changed\n");
+ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_
+ dwc_otg_hcd->flags.b.port_connec dwc_otg_hcd->flags.b.port_connec
+ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: % DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %
+ dwc_otg_hcd->flags.b.port_reset_ dwc_otg_hcd->flags.b.port_reset_
+ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: DWC_DEBUGPL(DBG_HCDV, " port_enable_change:
+ dwc_otg_hcd->flags.b.port_enable dwc_otg_hcd->flags.b.port_enable
+ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: DWC_DEBUGPL(DBG_HCDV, " port_suspend_change:
+ dwc_otg_hcd->flags.b.port_suspen dwc_otg_hcd->flags.b.port_suspen
+ DWC_DEBUGPL(DBG_HCDV, " port_over_current_ch DWC_DEBUGPL(DBG_HCDV, " port_over_current_ch
+ dwc_otg_hcd->flags.b.port_over_c dwc_otg_hcd->flags.b.port_over_c
+ } }
+
+#endif /* */ #endif /* */
+ return (_buf[0] != 0); return (_buf[0] != 0);
+} }
+
+
+#ifdef DWC_HS_ELECT_TST #ifdef DWC_HS_ELECT_TST
+/* /*
+ * Quick and dirty hack to implement the HS Electrical Test * Quick and dirty hack to implement the HS Electrical Test
+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature. * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
+ * *
+ * This code was copied from our userspace app "hset". It sen * This code was copied from our userspace app "hset". It sen
+ * Get Device Descriptor control sequence in two parts, first * Get Device Descriptor control sequence in two parts, first
+ * Setup packet by itself, followed some time later by the In * Setup packet by itself, followed some time later by the In
+ * Ack packets. Rather than trying to figure out how to add t * Ack packets. Rather than trying to figure out how to add t
+ * functionality to the normal driver code, we just hijack th * functionality to the normal driver code, we just hijack th
+ * hardware, using these two function to drive the hardware * hardware, using these two function to drive the hardware
+ * directly. * directly.
+ */ */
+dwc_otg_core_global_regs_t * global_regs; dwc_otg_core_global_regs_t * global_regs;
+dwc_otg_host_global_regs_t * hc_global_regs; dwc_otg_host_global_regs_t * hc_global_regs;
+dwc_otg_hc_regs_t * hc_regs; dwc_otg_hc_regs_t * hc_regs;
+uint32_t * data_fifo; uint32_t * data_fifo;
+
+static void do_setup(void) static void do_setup(void)
+{ {
+ gintsts_data_t gintsts; gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar; hcchar_data_t hcchar;
+ haint_data_t haint; haint_data_t haint;
+ hcint_data_t hcint; hcint_data_t hcint;
+
+ /* Enable HAINTs */ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* /*
+ * Send Setup packet (Get Device Descriptor) * Send Setup packet (Get Device Descriptor)
+ */ */
+
+ /* Make sure channel is disabled */ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) { if (hcchar.b.chen) {
+
+ //fprintf(stderr, "Channel already enabled 1, HCC //fprintf(stderr, "Channel already enabled 1, HCC
+ hcchar.b.chdis = 1; hcchar.b.chdis = 1;
+
+// hcchar.b.chen = 1; // hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ //sleep(1); //sleep(1);
+ mdelay(1000); mdelay(1000);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts. //fprintf(stderr, "GINTSTS: %08x\n", gintsts.
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->h haint.d32 = dwc_read_reg32(&hc_global_regs->h
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32) //fprintf(stderr, "HAINT: %08x\n", haint.d32)
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32) //fprintf(stderr, "HCINT: %08x\n", hcint.d32)
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar) hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar)
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d3 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d3
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint dwc_write_reg32(&hc_global_regs->haint, haint
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintst dwc_write_reg32(&global_regs->gintsts, gintst
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar) hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar)
+
+ //if (hcchar.b.chen) { //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabl // fprintf(stderr, "** Channel _still_ enabl
+ //} //}
+ } }
+
+ /* Set HCTSIZ */ /* Set HCTSIZ */
+ hctsiz.d32 = 0; hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8; hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1; hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP; hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0; hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0; hcchar.b.epnum = 0;
+ hcchar.b.mps = 8; hcchar.b.mps = 8;
+ hcchar.b.chen = 1; hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ /* Fill FIFO with Setup data for Get Device Descripto /* Fill FIFO with Setup data for Get Device Descripto
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x100 data_fifo = (uint32_t *) ((char *)global_regs + 0x100
+ dwc_write_reg32(data_fifo++, 0x01000680); dwc_write_reg32(data_fifo++, 0x01000680);
+ dwc_write_reg32(data_fifo++, 0x00080000); dwc_write_reg32(data_fifo++, 0x00080000);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS
+
+ /* Wait for host channel interrupt */ /* Wait for host channel interrupt */
+ do { do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+ } while (gintsts.b.hcintr == 0); } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\
+
+ /* Disable HCINTs */ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+} }
+
+static void do_in_ack(void) static void do_in_ack(void)
+{ {
+ gintsts_data_t gintsts; gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar; hcchar_data_t hcchar;
+ haint_data_t haint; haint_data_t haint;
+ hcint_data_t hcint; hcint_data_t hcint;
+ host_grxsts_data_t grxsts; host_grxsts_data_t grxsts;
+
+ /* Enable HAINTs */ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* /*
+ * Receive Control In packet * Receive Control In packet
+ */ */
+
+ /* Make sure channel is disabled */ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ if (hcchar.b.chen) { if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 2, //fprintf(stderr, "Channel already enabled 2,
+ hcchar.b.chdis = 1; hcchar.b.chdis = 1;
+ hcchar.b.chen = 1; hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32) dwc_write_reg32(&hc_regs->hcchar, hcchar.d32)
+
+ //sleep(1); //sleep(1);
+ mdelay(1000); mdelay(1000);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts. //fprintf(stderr, "GINTSTS: %08x\n", gintsts.
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->h haint.d32 = dwc_read_reg32(&hc_global_regs->h
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32) //fprintf(stderr, "HAINT: %08x\n", haint.d32)
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32) //fprintf(stderr, "HCINT: %08x\n", hcint.d32)
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar) hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar)
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d3 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d3
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint dwc_write_reg32(&hc_global_regs->haint, haint
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintst dwc_write_reg32(&global_regs->gintsts, gintst
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar) hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar)
+
+ //if (hcchar.b.chen) { //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ e // fprintf(stderr, "** Channel _still_ e
+ //} //}
+ } }
+
+ /* Set HCTSIZ */ /* Set HCTSIZ */
+ hctsiz.d32 = 0; hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8; hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1; hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 1; hcchar.b.epdir = 1;
+ hcchar.b.epnum = 0; hcchar.b.epnum = 0;
+ hcchar.b.mps = 8; hcchar.b.mps = 8;
+ hcchar.b.chen = 1; hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS
+
+ /* Wait for receive status queue interrupt */ /* Wait for receive status queue interrupt */
+ do { do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+ } while (gintsts.b.rxstsqlvl == 0); } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n
+
+ /* Read RXSTS */ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0; gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1; gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ switch (grxsts.b.pktsts) { switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN: case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer */ /* Read the data into the host buffer */
+ if (grxsts.b.bcnt > 0) { if (grxsts.b.bcnt > 0) {
+ int i; int i;
+ int word_count = (grxsts.b.bcnt + 3) int word_count = (grxsts.b.bcnt + 3)
+ data_fifo = (uint32_t *) ((char *)glo data_fifo = (uint32_t *) ((char *)glo
+ for (i = 0; i < word_count; i++) { for (i = 0; i < word_count; i++) {
+ (void)dwc_read_reg32(data_fif (void)dwc_read_reg32(data_fif
+ } }
+ } }
+ //fprintf(stderr, "Received %u bytes\n", (unsigne //fprintf(stderr, "Received %u bytes\n", (unsigne
+ break; break;
+ default: default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet st //fprintf(stderr, "** Unexpected GRXSTS packet st
+ break; break;
+ } }
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS
+
+ /* Wait for receive status queue interrupt */ /* Wait for receive status queue interrupt */
+ do { do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+ } while (gintsts.b.rxstsqlvl == 0); } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n
+
+ /* Read RXSTS */ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0; gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1; gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ switch (grxsts.b.pktsts) { switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ break; break;
+ default: default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet st //fprintf(stderr, "** Unexpected GRXSTS packet st
+ break; break;
+ } }
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = % //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %
+
+ /* Wait for host channel interrupt */ /* Wait for host channel interrupt */
+ do { do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+ } while (gintsts.b.hcintr == 0); } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ // usleep(100000); // usleep(100000);
+ // mdelay(100); // mdelay(100);
+ mdelay(1); mdelay(1);
+
+ /* /*
+ * Send handshake packet * Send handshake packet
+ */ */
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Make sure channel is disabled */ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) { if (hcchar.b.chen) {
+
+ //fprintf(stderr, "Channel already enabled 3, HCC //fprintf(stderr, "Channel already enabled 3, HCC
+ hcchar.b.chdis = 1; hcchar.b.chdis = 1;
+ hcchar.b.chen = 1; hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32) dwc_write_reg32(&hc_regs->hcchar, hcchar.d32)
+
+ //sleep(1); //sleep(1);
+ mdelay(1000); mdelay(1000);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts. //fprintf(stderr, "GINTSTS: %08x\n", gintsts.
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->h haint.d32 = dwc_read_reg32(&hc_global_regs->h
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32) //fprintf(stderr, "HAINT: %08x\n", haint.d32)
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32) //fprintf(stderr, "HCINT: %08x\n", hcint.d32)
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar) hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar)
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d3 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d3
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint dwc_write_reg32(&hc_global_regs->haint, haint
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintst dwc_write_reg32(&global_regs->gintsts, gintst
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar) hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar)
+
+ //if (hcchar.b.chen) { //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ e // fprintf(stderr, "** Channel _still_ e
+ //} //}
+ } }
+
+ /* Set HCTSIZ */ /* Set HCTSIZ */
+ hctsiz.d32 = 0; hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 0; hctsiz.b.xfersize = 0;
+ hctsiz.b.pktcnt = 1; hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0; hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0; hcchar.b.epnum = 0;
+ hcchar.b.mps = 8; hcchar.b.mps = 8;
+ hcchar.b.chen = 1; hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = % //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %
+
+ /* Wait for host channel interrupt */ /* Wait for host channel interrupt */
+ do { do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gi gintsts.d32 = dwc_read_reg32(&global_regs->gi
+ } while (gintsts.b.hcintr == 0); } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\
+
+ /* Disable HCINTs */ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+} }
+
+#endif /* DWC_HS_ELECT_TST */ #endif /* DWC_HS_ELECT_TST */
+
+/** Handles hub class-specific requests.*/ /** Handles hub class-specific requests.*/
+int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd, u16 _typeRe int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd, u16 _typeRe
+ u16 _wIndex, char *_buf, u16 _wLe u16 _wIndex, char *_buf, u16 _wLe
+{ {
+ int retval = 0; int retval = 0;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd
+ dwc_otg_core_if_t * core_if = hcd_to_dwc_otg_hcd(_hcd dwc_otg_core_if_t * core_if = hcd_to_dwc_otg_hcd(_hcd
+ struct usb_hub_descriptor *desc; struct usb_hub_descriptor *desc;
+ hprt0_data_t hprt0 = {.d32 = 0}; hprt0_data_t hprt0 = {.d32 = 0};
+ uint32_t port_status; uint32_t port_status;
+ switch (_typeReq) { switch (_typeReq) {
+ case ClearHubFeature: case ClearHubFeature:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL
+ "ClearHubFeature 0x%x\n", _wValu "ClearHubFeature 0x%x\n", _wValu
+ switch (_wValue) { switch (_wValue) {
+ case C_HUB_LOCAL_POWER: case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT: case C_HUB_OVER_CURRENT:
+ /* Nothing required here */ /* Nothing required here */
+ break; break;
+ default: default:
+ retval = -EINVAL; retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - ClearHubFeat DWC_ERROR("DWC OTG HCD - ClearHubFeat
+ _wValue); _wValue);
+ } }
+ break; break;
+ case ClearPortFeature: case ClearPortFeature:
+ if (!_wIndex || _wIndex > 1) if (!_wIndex || _wIndex > 1)
+ goto error; goto error;
+ switch (_wValue) { switch (_wValue) {
+ case USB_PORT_FEAT_ENABLE: case USB_PORT_FEAT_ENABLE:
+ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB
+ "ClearPortFeature USB_PO "ClearPortFeature USB_PO
+ hprt0.d32 = dwc_otg_read_hprt0(core_i hprt0.d32 = dwc_otg_read_hprt0(core_i
+ hprt0.b.prtena = 1; hprt0.b.prtena = 1;
+ dwc_write_reg32(core_if->host_if->hpr dwc_write_reg32(core_if->host_if->hpr
+ break; break;
+ case USB_PORT_FEAT_SUSPEND: case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "ClearPortFeature USB_PO "ClearPortFeature USB_PO
+ hprt0.d32 = dwc_otg_read_hprt0(core_i hprt0.d32 = dwc_otg_read_hprt0(core_i
+ hprt0.b.prtres = 1; hprt0.b.prtres = 1;
+ dwc_write_reg32(core_if->host_if->hpr dwc_write_reg32(core_if->host_if->hpr
+
+ /* Clear Resume bit */ /* Clear Resume bit */
+ mdelay(100); mdelay(100);
+ hprt0.b.prtres = 0; hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hpr dwc_write_reg32(core_if->host_if->hpr
+ break; break;
+ case USB_PORT_FEAT_POWER: case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "ClearPortFeature USB_PO "ClearPortFeature USB_PO
+ hprt0.d32 = dwc_otg_read_hprt0(core_i hprt0.d32 = dwc_otg_read_hprt0(core_i
+ hprt0.b.prtpwr = 0; hprt0.b.prtpwr = 0;
+ dwc_write_reg32(core_if->host_if->hpr dwc_write_reg32(core_if->host_if->hpr
+ break; break;
+ case USB_PORT_FEAT_INDICATOR: case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "ClearPortFeature USB_PO "ClearPortFeature USB_PO
+
+ /* Port inidicator not supported */ /* Port inidicator not supported */
+ break; break;
+ case USB_PORT_FEAT_C_CONNECTION: case USB_PORT_FEAT_C_CONNECTION:
+ /* Clears drivers internal connect st /* Clears drivers internal connect st
+ * flag */ * flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "ClearPortFeature USB "ClearPortFeature USB
+ dwc_otg_hcd->flags.b.port_connect_sta dwc_otg_hcd->flags.b.port_connect_sta
+ break; break;
+ case USB_PORT_FEAT_C_RESET: case USB_PORT_FEAT_C_RESET:
+ /* Clears the driver's internal Port /* Clears the driver's internal Port
+ * flag */ * flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "ClearPortFeature USB "ClearPortFeature USB
+ dwc_otg_hcd->flags.b.port_reset_chang dwc_otg_hcd->flags.b.port_reset_chang
+ break; break;
+ case USB_PORT_FEAT_C_ENABLE: case USB_PORT_FEAT_C_ENABLE:
+ /* Clears the driver's internal Port /* Clears the driver's internal Port
+ * Enable/Disable Change flag */ * Enable/Disable Change flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "ClearPortFeature USB "ClearPortFeature USB
+ dwc_otg_hcd->flags.b.port_enable_chan dwc_otg_hcd->flags.b.port_enable_chan
+ break; break;
+ case USB_PORT_FEAT_C_SUSPEND: case USB_PORT_FEAT_C_SUSPEND:
+ /* Clears the driver's internal Port Susp /* Clears the driver's internal Port Susp
+ * Change flag, which is set when resume * Change flag, which is set when resume
+ * the host port is complete */ * the host port is complete */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CON DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CON
+ "ClearPortFeature USB "ClearPortFeature USB
+ dwc_otg_hcd->flags.b.port_suspend_cha dwc_otg_hcd->flags.b.port_suspend_cha
+ break; break;
+ case USB_PORT_FEAT_C_OVER_CURRENT: case USB_PORT_FEAT_C_OVER_CURRENT:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "ClearPortFeature USB_PO "ClearPortFeature USB_PO
+ dwc_otg_hcd->flags.b.port_over_curren dwc_otg_hcd->flags.b.port_over_curren
+ break; break;
+ default: default:
+ retval = -EINVAL; retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - " DWC_ERROR("DWC OTG HCD - "
+ "ClearPortFeature request "ClearPortFeature request
+ "unknown or unsupported\n" "unknown or unsupported\n"
+ } }
+ break; break;
+ case GetHubDescriptor: case GetHubDescriptor:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL
+ "GetHubDescriptor\n"); "GetHubDescriptor\n");
+ desc = (struct usb_hub_descriptor *)_buf; desc = (struct usb_hub_descriptor *)_buf;
+ desc->bDescLength = 9; desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29; desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1; desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = 0x08; desc->wHubCharacteristics = 0x08;
+ desc->bPwrOn2PwrGood = 1; desc->bPwrOn2PwrGood = 1;
+ desc->bHubContrCurrent = 0; desc->bHubContrCurrent = 0;
+ desc->bitmap[0] = 0; desc->bitmap[0] = 0;
+ desc->bitmap[1] = 0xff; desc->bitmap[1] = 0xff;
+ break; break;
+ case GetHubStatus: case GetHubStatus:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL
+ "GetHubStatus\n"); "GetHubStatus\n");
+ memset(_buf, 0, 4); memset(_buf, 0, 4);
+ break; break;
+ case GetPortStatus: case GetPortStatus:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL
+ "GetPortStatus\n"); "GetPortStatus\n");
+ if (!_wIndex || _wIndex > 1) if (!_wIndex || _wIndex > 1)
+ goto error; goto error;
+ port_status = 0; port_status = 0;
+ if (dwc_otg_hcd->flags.b.port_connect_status_ if (dwc_otg_hcd->flags.b.port_connect_status_
+ port_status |= (1 << USB_PORT_FEAT_C_ port_status |= (1 << USB_PORT_FEAT_C_
+ if (dwc_otg_hcd->flags.b.port_enable_change) if (dwc_otg_hcd->flags.b.port_enable_change)
+ port_status |= (1 << USB_PORT_FEAT_C_ port_status |= (1 << USB_PORT_FEAT_C_
+ if (dwc_otg_hcd->flags.b.port_suspend_change) if (dwc_otg_hcd->flags.b.port_suspend_change)
+ port_status |= (1 << USB_PORT_FEAT_C_ port_status |= (1 << USB_PORT_FEAT_C_
+ if (dwc_otg_hcd->flags.b.port_reset_change) if (dwc_otg_hcd->flags.b.port_reset_change)
+ port_status |= (1 << USB_PORT_FEAT_C_ port_status |= (1 << USB_PORT_FEAT_C_
+ if (dwc_otg_hcd->flags.b.port_over_current_ch if (dwc_otg_hcd->flags.b.port_over_current_ch
+ DWC_ERROR("Device Not Supported\n"); DWC_ERROR("Device Not Supported\n");
+ port_status |= (1 << USB_PORT_FEAT_C_ port_status |= (1 << USB_PORT_FEAT_C_
+ } }
+ if (!dwc_otg_hcd->flags.b.port_connect_status if (!dwc_otg_hcd->flags.b.port_connect_status
+ /* /*
+ * The port is disconnected, which means * The port is disconnected, which means
+ * either in device mode or it soon will * either in device mode or it soon will
+ * return 0's for the remainder of the po * return 0's for the remainder of the po
+ * since the port register can't be read * since the port register can't be read
+ * is in device mode. * is in device mode.
+ */ */
+ *((__le32 *) _buf) = cpu_to_le32(port_sta *((__le32 *) _buf) = cpu_to_le32(port_sta
+ break; break;
+ } }
+ hprt0.d32 = dwc_read_reg32(core_if->host_if-> hprt0.d32 = dwc_read_reg32(core_if->host_if->
+ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hp DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hp
+ if (hprt0.b.prtconnsts) if (hprt0.b.prtconnsts)
+ port_status |= (1 << USB_PORT_FEAT_CO port_status |= (1 << USB_PORT_FEAT_CO
+ if (hprt0.b.prtena) if (hprt0.b.prtena)
+ port_status |= (1 << USB_PORT_FEAT_EN port_status |= (1 << USB_PORT_FEAT_EN
+ if (hprt0.b.prtsusp) if (hprt0.b.prtsusp)
+ port_status |= (1 << USB_PORT_FEAT_SU port_status |= (1 << USB_PORT_FEAT_SU
+ if (hprt0.b.prtovrcurract) if (hprt0.b.prtovrcurract)
+ port_status |= (1 << USB_PORT_FEAT_OV port_status |= (1 << USB_PORT_FEAT_OV
+ if (hprt0.b.prtrst) if (hprt0.b.prtrst)
+ port_status |= (1 << USB_PORT_FEAT_RE port_status |= (1 << USB_PORT_FEAT_RE
+ if (hprt0.b.prtpwr) if (hprt0.b.prtpwr)
+ port_status |= (1 << USB_PORT_FEAT_PO port_status |= (1 << USB_PORT_FEAT_PO
+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_S if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_S
+ port_status |= USB_PORT_STAT_HIGH_SPE | port_status |= (1 << USB_PORT_FEAT_HI
+
+ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_L else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_L
+ port_status |= (1 << USB_PORT_FEAT_LO port_status |= (1 << USB_PORT_FEAT_LO
+ if (hprt0.b.prttstctl) if (hprt0.b.prttstctl)
+ port_status |= (1 << USB_PORT_FEAT_TE port_status |= (1 << USB_PORT_FEAT_TE
+
+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 * /* USB_PORT_FEAT_INDICATOR unsupported always 0 *
+ *((__le32 *) _buf) = cpu_to_le32(port_status); *((__le32 *) _buf) = cpu_to_le32(port_status);
+ break; break;
+ case SetHubFeature: case SetHubFeature:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL
+ "SetHubFeature\n"); "SetHubFeature\n");
+
+ /* No HUB features supported */ /* No HUB features supported */
+ break; break;
+ case SetPortFeature: case SetPortFeature:
+ if (_wValue != USB_PORT_FEAT_TEST && (!_wInde if (_wValue != USB_PORT_FEAT_TEST && (!_wInde
+ goto error; goto error;
+ if (!dwc_otg_hcd->flags.b.port_connect_status if (!dwc_otg_hcd->flags.b.port_connect_status
+ /* /*
+ * The port is disconnected, which means * The port is disconnected, which means
+ * either in device mode or it soon will * either in device mode or it soon will
+ * return without doing anything since th * return without doing anything since th
+ * register can't be written if the core * register can't be written if the core
+ * mode. * mode.
+ */ */
+ break; break;
+ } }
+ switch (_wValue) { switch (_wValue) {
+ case USB_PORT_FEAT_SUSPEND: case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "SetPortFeature - USB_PO "SetPortFeature - USB_PO
+ if (_hcd->self.otg_port == _wIndex if (_hcd->self.otg_port == _wIndex
+ && _hcd->self.b_hnp_enable) { && _hcd->self.b_hnp_enable) {
+ gotgctl_data_t gotgctl = {.d3 gotgctl_data_t gotgctl = {.d3
+ gotgctl.b.hstsethnpen = 1; gotgctl.b.hstsethnpen = 1;
+ dwc_modify_reg32(&core_if->co dwc_modify_reg32(&core_if->co
+ gotgctl, 0, gotgctl, 0,
+ core_if->op_state = A_SUSPEND core_if->op_state = A_SUSPEND
+ } }
+ hprt0.d32 = dwc_otg_read_hprt0(core_i hprt0.d32 = dwc_otg_read_hprt0(core_i
+ hprt0.b.prtsusp = 1; hprt0.b.prtsusp = 1;
+ dwc_write_reg32(core_if->host_if->hpr dwc_write_reg32(core_if->host_if->hpr
+
+ //DWC_PRINT( "SUSPEND: HPRT0=%0x\n", hprt //DWC_PRINT( "SUSPEND: HPRT0=%0x\n", hprt
+ /* Suspend the Phy Clock */ /* Suspend the Phy Clock */
+ { {
+ pcgcctl_data_t pcgcctl = {.d3 pcgcctl_data_t pcgcctl = {.d3
+ pcgcctl.b.stoppclk = 1; pcgcctl.b.stoppclk = 1;
+ dwc_write_reg32(core_if->pcgc dwc_write_reg32(core_if->pcgc
+ } }
+
+ /* For HNP the bus must be suspended for /* For HNP the bus must be suspended for
+ if (_hcd->self.b_hnp_enable) { if (_hcd->self.b_hnp_enable) {
+ mdelay(200); mdelay(200);
+
+ //DWC_PRINT( "SUSPEND: wait compl //DWC_PRINT( "SUSPEND: wait compl
+ } }
+ break; break;
+ case USB_PORT_FEAT_POWER: case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "SetPortFeature - USB_PO "SetPortFeature - USB_PO
+ hprt0.d32 = dwc_otg_read_hprt0(core_i hprt0.d32 = dwc_otg_read_hprt0(core_i
+ hprt0.b.prtpwr = 1; hprt0.b.prtpwr = 1;
+ dwc_write_reg32(core_if->host_if->hpr dwc_write_reg32(core_if->host_if->hpr
+ break; break;
+ case USB_PORT_FEAT_RESET: case USB_PORT_FEAT_RESET:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "SetPortFeature - USB_PO "SetPortFeature - USB_PO
+ hprt0.d32 = dwc_otg_read_hprt0(core_i hprt0.d32 = dwc_otg_read_hprt0(core_i
+
+ /* When B-Host the Port reset bit is set /* When B-Host the Port reset bit is set
+ * the Start HCD Callback function, so th * the Start HCD Callback function, so th
+ * the reset is started within 1ms of the * the reset is started within 1ms of the
+ * success interrupt. */ * success interrupt. */
+ if (!_hcd->self.is_b_host) { if (!_hcd->self.is_b_host) {
+ hprt0.b.prtrst = 1; hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host dwc_write_reg32(core_if->host
+ } }
+
+ /* Clear reset bit in 10ms (FS/LS) or 50m /* Clear reset bit in 10ms (FS/LS) or 50m
+ MDELAY(60); | MDELAY(60);
+ hprt0.b.prtrst = 0; hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hpr dwc_write_reg32(core_if->host_if->hpr
+ break; break;
+
+#ifdef DWC_HS_ELECT_TST #ifdef DWC_HS_ELECT_TST
+ case USB_PORT_FEAT_TEST: case USB_PORT_FEAT_TEST:
+ { {
+ uint32_t t; uint32_t t;
+ gintmsk_data_t gintmsk; gintmsk_data_t gintmsk;
+ t = (_wIndex >> 8); /* MS t = (_wIndex >> 8); /* MS
+ DWC_DEBUGPL(DBG_HCD, DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD HUB "DWC OTG HCD HUB
+ "SetPortFeature "SetPortFeature
+ t); t);
+ warn("USB_PORT_FEAT_TEST %d\n warn("USB_PORT_FEAT_TEST %d\n
+ if (t < 6) { if (t < 6) {
+ hprt0.d32 = dwc_otg_r hprt0.d32 = dwc_otg_r
+ hprt0.b.prttstctl = t hprt0.b.prttstctl = t
+ dwc_write_reg32(core_ dwc_write_reg32(core_
+ } else { } else {
+ /* Setup global vars with /* Setup global vars with
+ * dirty hack, should be * dirty hack, should be
+ */ */
+ global_regs = core_if->co global_regs = core_if->co
+ hc_global_regs = core hc_global_regs = core
+ hc_regs = (dwc_otg_hc hc_regs = (dwc_otg_hc
+ data_fifo = (uint32_t data_fifo = (uint32_t
+ if (t == 6) { /* HS if (t == 6) { /* HS
+ /* Save curre /* Save curre
+ gintmsk.d32 = gintmsk.d32 =
+
+ /* Disable al /* Disable al
+ * the hardwa * the hardwa
+ */ */
+ dwc_write_reg dwc_write_reg
+
+ /* 15 second /* 15 second
+ mdelay(15000) mdelay(15000)
+
+ /* Drive susp /* Drive susp
+ hprt0.d32 = d hprt0.d32 = d
+ hprt0.b.prtsu hprt0.b.prtsu
+ hprt0.b.prtre hprt0.b.prtre
+ dwc_write_reg dwc_write_reg
+
+ /* 15 second /* 15 second
+ mdelay(15000) mdelay(15000)
+
+ /* Drive resu /* Drive resu
+ hprt0.d32 = d hprt0.d32 = d
+ hprt0.b.prtsu hprt0.b.prtsu
+ hprt0.b.prtre hprt0.b.prtre
+ dwc_write_reg dwc_write_reg
+ mdelay(100); mdelay(100);
+
+ /* Clear the /* Clear the
+ hprt0.b.prtre hprt0.b.prtre
+ dwc_write_reg dwc_write_reg
+
+ /* Restore interr /* Restore interr
+ dwc_write_reg32(& dwc_write_reg32(&
+ } else if (t == 7) { } else if (t == 7) {
+ /* Save curre /* Save curre
+ gintmsk.d32 = gintmsk.d32 =
+
+ /* Disable al /* Disable al
+ * the hardwa * the hardwa
+ */ */
+ dwc_write_reg dwc_write_reg
+
+ /* 15 second /* 15 second
+ mdelay(15000) mdelay(15000)
+
+ /* Send the S /* Send the S
+ do_setup(); do_setup();
+
+ /* 15 second /* 15 second
+ mdelay(15000) mdelay(15000)
+
+ /* Restore in /* Restore in
+ dwc_write_reg dwc_write_reg
+ } else if (t == 8) { } else if (t == 8) {
+ /* Save curre /* Save curre
+ gintmsk.d32 = gintmsk.d32 =
+
+ /* Disable al /* Disable al
+ * the hardwa * the hardwa
+ */ */
+ dwc_write_reg dwc_write_reg
+
+ /* Send the S /* Send the S
+ do_setup(); do_setup();
+
+ /* 15 second /* 15 second
+ mdelay(15000) mdelay(15000)
+
+ /* Send the I /* Send the I
+ do_in_ack(); do_in_ack();
+
+ /* 15 second /* 15 second
+ mdelay(15000) mdelay(15000)
+
+ /* Restore in /* Restore in
+ dwc_write_reg dwc_write_reg
+ } }
+ } }
+ break; break;
+ } }
+
+#endif /* DWC_HS_ELECT_TST */ #endif /* DWC_HS_ELECT_TST */
+ case USB_PORT_FEAT_INDICATOR: case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB
+ "SetPortFeature - USB_PO "SetPortFeature - USB_PO
+ /* Not supported */ /* Not supported */
+ break; break;
+ default: default:
+ retval = -EINVAL; retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - " DWC_ERROR("DWC OTG HCD - "
+ "SetPortFeature request %x "SetPortFeature request %x
+ "unknown or unsupported\n" "unknown or unsupported\n"
+ break; break;
+ } }
+ break; break;
+ default: default:
+ error:retval = -EINVAL; error:retval = -EINVAL;
+ DWC_WARN("DWC OTG HCD - " DWC_WARN("DWC OTG HCD - "
+ "Unknown hub control request type o "Unknown hub control request type o
+ _typeReq, _wIndex, _wValue); _typeReq, _wIndex, _wValue);
+ break; break;
+ } }
+ return retval; return retval;
+} }
+
+
+/** /**
+ * Assigns transactions from a QTD to a free host channel and * Assigns transactions from a QTD to a free host channel and
+ * host channel to perform the transactions. The host channel * host channel to perform the transactions. The host channel
+ * the free list. * the free list.
+ * *
+ * @param _hcd The HCD state structure. * @param _hcd The HCD state structure.
+ * @param _qh Transactions from the first QTD for this QH are * @param _qh Transactions from the first QTD for this QH are
+ * assigned to a free host channel. * assigned to a free host channel.
+ */ */
+static void assign_and_init_hc(dwc_otg_hcd_t * _hcd, dwc_otg_ static void assign_and_init_hc(dwc_otg_hcd_t * _hcd, dwc_otg_
+{ {
+ dwc_hc_t * hc; dwc_hc_t * hc;
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+ struct urb *urb; struct urb *urb;
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, _hcd, DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, _hcd,
+ hc = list_entry(_hcd->free_hc_list.next, dwc_hc_t, hc hc = list_entry(_hcd->free_hc_list.next, dwc_hc_t, hc
+
+ /* Remove the host channel from the free list. */ /* Remove the host channel from the free list. */
+ list_del_init(&hc->hc_list_entry); list_del_init(&hc->hc_list_entry);
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, q qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, q
+ urb = qtd->urb; urb = qtd->urb;
+ _qh->channel = hc; _qh->channel = hc;
+ _qh->qtd_in_process = qtd; _qh->qtd_in_process = qtd;
+
+ /* /*
+ * Use usb_pipedevice to determine device address. Th * Use usb_pipedevice to determine device address. Th
+ * 0 before the SET_ADDRESS command and the correct a * 0 before the SET_ADDRESS command and the correct a
+ */ */
+ hc->dev_addr = usb_pipedevice(urb->pipe); hc->dev_addr = usb_pipedevice(urb->pipe);
+ hc->ep_num = usb_pipeendpoint(urb->pipe); hc->ep_num = usb_pipeendpoint(urb->pipe);
+ if (urb->dev->speed == USB_SPEED_LOW) { if (urb->dev->speed == USB_SPEED_LOW) {
+ hc->speed = DWC_OTG_EP_SPEED_LOW; hc->speed = DWC_OTG_EP_SPEED_LOW;
+ } else if (urb->dev->speed == USB_SPEED_FULL) { } else if (urb->dev->speed == USB_SPEED_FULL) {
+ hc->speed = DWC_OTG_EP_SPEED_FULL; hc->speed = DWC_OTG_EP_SPEED_FULL;
+ } else { } else {
+ hc->speed = DWC_OTG_EP_SPEED_HIGH; hc->speed = DWC_OTG_EP_SPEED_HIGH;
+ } }
+ hc->max_packet = dwc_max_packet(_qh->maxp); hc->max_packet = dwc_max_packet(_qh->maxp);
+ hc->xfer_started = 0; hc->xfer_started = 0;
+ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS; hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
+ hc->error_state = (qtd->error_count > 0); hc->error_state = (qtd->error_count > 0);
+ hc->halt_on_queue = 0; hc->halt_on_queue = 0;
+ hc->halt_pending = 0; hc->halt_pending = 0;
+ hc->requests = 0; hc->requests = 0;
+
+ /* /*
+ * The following values may be modified in the transf * The following values may be modified in the transf
+ * below. The xfer_len value may be reduced when the * below. The xfer_len value may be reduced when the
+ * started to accommodate the max widths of the XferS * started to accommodate the max widths of the XferS
+ * fields in the HCTSIZn register. * fields in the HCTSIZn register.
+ */ */
+ hc->do_ping = _qh->ping_state; hc->do_ping = _qh->ping_state;
+ hc->ep_is_in = (usb_pipein(urb->pipe) != 0); hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
+ hc->data_pid_start = _qh->data_toggle; hc->data_pid_start = _qh->data_toggle;
+ hc->multi_count = 1; hc->multi_count = 1;
+ if (_hcd->core_if->dma_enable) { if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = hc->xfer_buff =
+ (uint8_t *)(u32)urb->transfer_dma + u (uint8_t *)(u32)urb->transfer_dma + u
+ } else { } else {
+ hc->xfer_buff = hc->xfer_buff =
+ (uint8_t *) urb->transfer_buffer + urb->a (uint8_t *) urb->transfer_buffer + urb->a
+ } }
+ hc->xfer_len = urb->transfer_buffer_length - urb->act hc->xfer_len = urb->transfer_buffer_length - urb->act
+ hc->xfer_count = 0; hc->xfer_count = 0;
+
+ /* /*
+ * Set the split attributes * Set the split attributes
+ */ */
+ hc->do_split = 0; hc->do_split = 0;
+ if (_qh->do_split) { if (_qh->do_split) {
+ hc->do_split = 1; hc->do_split = 1;
+ hc->xact_pos = qtd->isoc_split_pos; hc->xact_pos = qtd->isoc_split_pos;
+ hc->complete_split = qtd->complete_split; hc->complete_split = qtd->complete_split;
+ hc->hub_addr = urb->dev->tt->hub->devnum; hc->hub_addr = urb->dev->tt->hub->devnum;
+ hc->port_addr = urb->dev->ttport; hc->port_addr = urb->dev->ttport;
+ } }
+ switch (usb_pipetype(urb->pipe)) { switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL; hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
+ switch (qtd->control_phase) { switch (qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP: case DWC_OTG_CONTROL_SETUP:
+ DWC_DEBUGPL(DBG_HCDV, " Control setu DWC_DEBUGPL(DBG_HCDV, " Control setu
+ hc->do_ping = 0; hc->do_ping = 0;
+ hc->ep_is_in = 0; hc->ep_is_in = 0;
+ hc->data_pid_start = DWC_OTG_HC_PID_S hc->data_pid_start = DWC_OTG_HC_PID_S
+ if (_hcd->core_if->dma_enable) { if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u hc->xfer_buff = (uint8_t *)(u
+ } else { } else {
+ hc->xfer_buff = (uint8_t *) u hc->xfer_buff = (uint8_t *) u
+ } }
+ hc->xfer_len = 8; hc->xfer_len = 8;
+ break; break;
+ case DWC_OTG_CONTROL_DATA: case DWC_OTG_CONTROL_DATA:
+ DWC_DEBUGPL(DBG_HCDV, " Control data DWC_DEBUGPL(DBG_HCDV, " Control data
+ hc->data_pid_start = qtd->data_toggle hc->data_pid_start = qtd->data_toggle
+ break; break;
+ case DWC_OTG_CONTROL_STATUS: case DWC_OTG_CONTROL_STATUS:
+
+ /* /*
+ * Direction is opposite of data dire * Direction is opposite of data dire
+ * data. * data.
+ */ */
+ DWC_DEBUGPL(DBG_HCDV, DWC_DEBUGPL(DBG_HCDV,
+ " Control status tra " Control status tra
+ if (urb->transfer_buffer_length == 0) if (urb->transfer_buffer_length == 0)
+ hc->ep_is_in = 1; hc->ep_is_in = 1;
+ } else { } else {
+ hc->ep_is_in = (usb_pipein(ur hc->ep_is_in = (usb_pipein(ur
+ } }
+ if (hc->ep_is_in) { if (hc->ep_is_in) {
+ hc->do_ping = 0; hc->do_ping = 0;
+ } }
+ hc->data_pid_start = DWC_OTG_HC_PID_D hc->data_pid_start = DWC_OTG_HC_PID_D
+ hc->xfer_len = 0; hc->xfer_len = 0;
+ if (_hcd->core_if->dma_enable) { if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u hc->xfer_buff = (uint8_t *)(u
+ } else { } else {
+ hc->xfer_buff = (uint8_t *) _ hc->xfer_buff = (uint8_t *) _
+ } }
+ break; break;
+ } }
+ break; break;
+ case PIPE_BULK: case PIPE_BULK:
+ hc->ep_type = DWC_OTG_EP_TYPE_BULK; hc->ep_type = DWC_OTG_EP_TYPE_BULK;
+ break; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ hc->ep_type = DWC_OTG_EP_TYPE_INTR; hc->ep_type = DWC_OTG_EP_TYPE_INTR;
+ break; break;
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ { {
+ struct usb_iso_packet_descriptor *fra struct usb_iso_packet_descriptor *fra
+ frame_desc = &urb->iso_frame_desc[qtd frame_desc = &urb->iso_frame_desc[qtd
+ hc->ep_type = DWC_OTG_EP_TYPE_ISOC; hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
+ if (_hcd->core_if->dma_enable) { if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u hc->xfer_buff = (uint8_t *)(u
+ } else { } else {
+ hc->xfer_buff = (uint8_t *) u hc->xfer_buff = (uint8_t *) u
+ } }
+ hc->xfer_buff += frame_desc->offset + hc->xfer_buff += frame_desc->offset +
+ hc->xfer_len = frame_desc->length - q hc->xfer_len = frame_desc->length - q
+ if (hc->xact_pos == DWC_HCSPLIT_XACTP if (hc->xact_pos == DWC_HCSPLIT_XACTP
+ if (hc->xfer_len <= 188) { if (hc->xfer_len <= 188) {
+ hc->xact_pos = DWC_HC hc->xact_pos = DWC_HC
+ } else { } else {
+ hc->xact_pos = DWC_HC hc->xact_pos = DWC_HC
+ } }
+ } }
+ } }
+ break; break;
+ } }
+
+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR if (hc->ep_type == DWC_OTG_EP_TYPE_INTR
+ || hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { || hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /* /*
+ * This value may be modified when the transf * This value may be modified when the transf
+ * reflect the actual transfer length. * reflect the actual transfer length.
+ */ */
+ hc->multi_count = dwc_hb_mult(_qh->maxp); hc->multi_count = dwc_hb_mult(_qh->maxp);
+ } }
+ dwc_otg_hc_init(_hcd->core_if, hc); dwc_otg_hc_init(_hcd->core_if, hc);
+ hc->qh = _qh; hc->qh = _qh;
+} }
+
+
+/** /**
+ * This function selects transactions from the HCD transfer s * This function selects transactions from the HCD transfer s
+ * assigns them to available host channels. It is called from * assigns them to available host channels. It is called from
+ * handler functions. * handler functions.
+ * *
+ * @param _hcd The HCD state structure. * @param _hcd The HCD state structure.
+ * *
+ * @return The types of new transactions that were assigned t * @return The types of new transactions that were assigned t
+ */ */
+dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dw dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dw
+{ {
+ struct list_head *qh_ptr; struct list_head *qh_ptr;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ int num_channels; int num_channels;
+ unsigned long flags; <
+ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACT dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACT
+
+#ifdef DEBUG_SOF #ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
+#endif /* */ #endif /* */
+
+ /* Process entries in the periodic ready list. */ /* Process entries in the periodic ready list. */
+ num_channels = _hcd->core_if->core_params->host_chann <
+ qh_ptr = _hcd->periodic_sched_ready.next; qh_ptr = _hcd->periodic_sched_ready.next;
+ while (qh_ptr != &_hcd->periodic_sched_ready while (qh_ptr != &_hcd->periodic_sched_ready
+ && !list_empty(&_hcd->free_hc_list)) { && !list_empty(&_hcd->free_hc_list)) {
+ <
+ // Make sure we leave one channel for non per <
+ local_irq_save(flags); <
+ if (_hcd->available_host_channels <= 1) { <
+ local_irq_restore(flags); <
+ break; <
+ } <
+ _hcd->available_host_channels--; <
+ local_irq_restore(flags); <
+ <
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list
+ assign_and_init_hc(_hcd, qh); assign_and_init_hc(_hcd, qh);
+ /* /*
+ * Move the QH from the periodic ready schedu * Move the QH from the periodic ready schedu
+ * periodic assigned schedule. * periodic assigned schedule.
+ */ */
+ qh_ptr = qh_ptr->next; qh_ptr = qh_ptr->next;
+ local_irq_save(flags); <
+ list_move(&qh->qh_list_entry, &_hcd->periodic list_move(&qh->qh_list_entry, &_hcd->periodic
+ local_irq_restore(flags); <
+ ret_val = DWC_OTG_TRANSACTION_PERIODIC; ret_val = DWC_OTG_TRANSACTION_PERIODIC;
+ } }
+ /* /*
+ * Process entries in the deferred portion of the non * Process entries in the deferred portion of the non
+ * A NAK put them here and, at the right time, they n * A NAK put them here and, at the right time, they n
+ * placed on the sched_inactive list. * placed on the sched_inactive list.
+ */ */
+ qh_ptr = _hcd->non_periodic_sched_deferred.next; qh_ptr = _hcd->non_periodic_sched_deferred.next;
+ while (qh_ptr != &_hcd->non_periodic_sched_deferred) while (qh_ptr != &_hcd->non_periodic_sched_deferred)
+ uint16_t frame_number = uint16_t frame_number =
+ dwc_otg_hcd_get_frame_number(dwc_otg_ dwc_otg_hcd_get_frame_number(dwc_otg_
+ > unsigned long flags;
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list
+ qh_ptr = qh_ptr->next; qh_ptr = qh_ptr->next;
+
+ if (dwc_frame_num_le(qh->sched_frame, frame_n if (dwc_frame_num_le(qh->sched_frame, frame_n
+ // NAK did this // NAK did this
+ /* /*
+ * Move the QH from the non periodic * Move the QH from the non periodic
+ * the non periodic inactive schedule * the non periodic inactive schedule
+ */ */
+ local_irq_save(flags); local_irq_save(flags);
+ list_move(&qh->qh_list_entry, list_move(&qh->qh_list_entry,
+ &_hcd->non_periodic_sched_i &_hcd->non_periodic_sched_i
+ local_irq_restore(flags); local_irq_restore(flags);
+ } }
+ } }
+
+ /* /*
+ * Process entries in the inactive portion of the non * Process entries in the inactive portion of the non
+ * schedule. Some free host channels may not be used * schedule. Some free host channels may not be used
+ * reserved for periodic transfers. * reserved for periodic transfers.
+ */ */
+ qh_ptr = _hcd->non_periodic_sched_inactive.next; qh_ptr = _hcd->non_periodic_sched_inactive.next;
+ num_channels = _hcd->core_if->core_params->host_chann num_channels = _hcd->core_if->core_params->host_chann
+ while (qh_ptr != &_hcd->non_periodic_sched_inactive | while (qh_ptr != &_hcd->non_periodic_sched_inactive &
+ > (_hcd->non_periodic_channels <
+ > num_channels - _hcd->periodic_channels)
+ && !list_empty(&_hcd->free_hc_list)) { && !list_empty(&_hcd->free_hc_list)) {
+ <
+ local_irq_save(flags); <
+ if (_hcd->available_host_channels < 1) { <
+ local_irq_restore(flags); <
+ break; <
+ } <
+ _hcd->available_host_channels--; <
+ local_irq_restore(flags); <
+ <
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list
+ assign_and_init_hc(_hcd, qh); assign_and_init_hc(_hcd, qh);
+
+ /* /*
+ * Move the QH from the non-periodic inactive * Move the QH from the non-periodic inactive
+ * non-periodic active schedule. * non-periodic active schedule.
+ */ */
+ qh_ptr = qh_ptr->next; qh_ptr = qh_ptr->next;
+ local_irq_save(flags); <
+ list_move(&qh->qh_list_entry, list_move(&qh->qh_list_entry,
+ &_hcd->non_periodic_sched_active); &_hcd->non_periodic_sched_active);
+ local_irq_restore(flags); <
+ if (ret_val == DWC_OTG_TRANSACTION_NONE) { if (ret_val == DWC_OTG_TRANSACTION_NONE) {
+ ret_val = DWC_OTG_TRANSACTION_NON_PER ret_val = DWC_OTG_TRANSACTION_NON_PER
+ } else { } else {
+ ret_val = DWC_OTG_TRANSACTION_ALL; ret_val = DWC_OTG_TRANSACTION_ALL;
+ } }
+ > _hcd->non_periodic_channels++;
+ } }
+ return ret_val; return ret_val;
+} }
+
+/** /**
+ * Attempts to queue a single transaction request for a host * Attempts to queue a single transaction request for a host
+ * associated with either a periodic or non-periodic transfer * associated with either a periodic or non-periodic transfer
+ * assumes that there is space available in the appropriate r * assumes that there is space available in the appropriate r
+ * an OUT transfer or SETUP transaction in Slave mode, it che * an OUT transfer or SETUP transaction in Slave mode, it che
+ * is available in the appropriate Tx FIFO. * is available in the appropriate Tx FIFO.
+ * *
+ * @param _hcd The HCD state structure. * @param _hcd The HCD state structure.
+ * @param _hc Host channel descriptor associated with either * @param _hc Host channel descriptor associated with either
+ * non-periodic transfer. * non-periodic transfer.
+ * @param _fifo_dwords_avail Number of DWORDs available in th * @param _fifo_dwords_avail Number of DWORDs available in th
+ * FIFO for periodic transfers or the non-periodic Tx FIFO fo * FIFO for periodic transfers or the non-periodic Tx FIFO fo
+ * transfers. * transfers.
+ * *
+ * @return 1 if a request is queued and more requests may be * @return 1 if a request is queued and more requests may be
+ * complete the transfer, 0 if no more requests are required * complete the transfer, 0 if no more requests are required
+ * transfer, -1 if there is insufficient space in the Tx FIFO * transfer, -1 if there is insufficient space in the Tx FIFO
+ */ */
+static int queue_transaction(dwc_otg_hcd_t * _hcd, static int queue_transaction(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, uint16_t _fifo_ dwc_hc_t * _hc, uint16_t _fifo_
+{ {
+ int retval; int retval;
+ if (_hcd->core_if->dma_enable) { if (_hcd->core_if->dma_enable) {
+ if (!_hc->xfer_started) { if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_ dwc_otg_hc_start_transfer(_hcd->core_
+ _hc->qh->ping_state = 0; _hc->qh->ping_state = 0;
+ } }
+ retval = 0; retval = 0;
+ } else if (_hc->halt_pending) { } else if (_hc->halt_pending) {
+ /* Don't queue a request if the channel has b /* Don't queue a request if the channel has b
+ retval = 0; retval = 0;
+ } else if (_hc->halt_on_queue) { } else if (_hc->halt_on_queue) {
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _hc->halt dwc_otg_hc_halt(_hcd->core_if, _hc, _hc->halt
+ retval = 0; retval = 0;
+ } else if (_hc->do_ping) { } else if (_hc->do_ping) {
+ if (!_hc->xfer_started) { if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_ dwc_otg_hc_start_transfer(_hcd->core_
+ } }
+ retval = 0; retval = 0;
+ } else if (!_hc->ep_is_in || _hc->data_pid_start == D } else if (!_hc->ep_is_in || _hc->data_pid_start == D
+ if ((_fifo_dwords_avail * 4) >= _hc->max_pack if ((_fifo_dwords_avail * 4) >= _hc->max_pack
+ if (!_hc->xfer_started) { if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hc dwc_otg_hc_start_transfer(_hc
+ retval = 1; retval = 1;
+ } else { } else {
+ retval = dwc_otg_hc_continue_ retval = dwc_otg_hc_continue_
+ } }
+ } else { } else {
+ retval = -1; retval = -1;
+ } }
+ } else { } else {
+ if (!_hc->xfer_started) { if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_ dwc_otg_hc_start_transfer(_hcd->core_
+ retval = 1; retval = 1;
+ } else { } else {
+ retval = dwc_otg_hc_continue_transfer retval = dwc_otg_hc_continue_transfer
+ } }
+ } }
+ return retval; return retval;
+} }
+
+
+/** /**
+ * Processes active non-periodic channels and queues transact * Processes active non-periodic channels and queues transact
+ * channels to the DWC_otg controller. After queueing transac * channels to the DWC_otg controller. After queueing transac
+ * FIFO Empty interrupt is enabled if there are more transact * FIFO Empty interrupt is enabled if there are more transact
+ * NP Tx FIFO or request queue space becomes available. Other * NP Tx FIFO or request queue space becomes available. Other
+ * FIFO Empty interrupt is disabled. * FIFO Empty interrupt is disabled.
+ */ */
+static void process_non_periodic_channels(dwc_otg_hcd_t * _hc static void process_non_periodic_channels(dwc_otg_hcd_t * _hc
+{ {
+ gnptxsts_data_t tx_status; gnptxsts_data_t tx_status;
+ struct list_head *orig_qh_ptr; struct list_head *orig_qh_ptr;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ int status; int status;
+ int no_queue_space = 0; int no_queue_space = 0;
+ int no_fifo_space = 0; int no_fifo_space = 0;
+ int more_to_do = 0; int more_to_do = 0;
+ dwc_otg_core_global_regs_t * global_regs = dwc_otg_core_global_regs_t * global_regs =
+ _hcd->core_if->core_global_regs; _hcd->core_if->core_global_regs;
+ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transaction DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transaction
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail
+ tx_status.b.nptxqspcavail); tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (befo DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (befo
+ tx_status.b.nptxfspcavail); tx_status.b.nptxfspcavail);
+#endif /* */ #endif /* */
+ /* /*
+ * Keep track of the starting point. Skip over the st * Keep track of the starting point. Skip over the st
+ * entry. * entry.
+ */ */
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodi _hcd->non_periodic_qh_ptr = _hcd->non_periodi
+ } }
+ orig_qh_ptr = _hcd->non_periodic_qh_ptr; orig_qh_ptr = _hcd->non_periodic_qh_ptr;
+
+ /* /*
+ * Process once through the active list or until no m * Process once through the active list or until no m
+ * available in the request queue or the Tx FIFO. * available in the request queue or the Tx FIFO.
+ */ */
+ do { do {
+
+ tx_status.d32 = dwc_read_reg32(&global_regs-> tx_status.d32 = dwc_read_reg32(&global_regs->
+ if (!_hcd->core_if->dma_enable if (!_hcd->core_if->dma_enable
+ && tx_status.b.nptxqspcavail == 0) { && tx_status.b.nptxqspcavail == 0) {
+ no_queue_space = 1; no_queue_space = 1;
+ break; break;
+ } }
+ qh = qh =
+ list_entry(_hcd->non_periodic_qh_ptr, dwc list_entry(_hcd->non_periodic_qh_ptr, dwc
+ qh_list_entry); qh_list_entry);
+ status = status =
+ queue_transaction(_hcd, qh->channel, queue_transaction(_hcd, qh->channel,
+ tx_status.b.nptxfspcava tx_status.b.nptxfspcava
+
+ if (status > 0) { if (status > 0) {
+ more_to_do = 1; more_to_do = 1;
+ } else if (status < 0) { } else if (status < 0) {
+ no_fifo_space = 1; no_fifo_space = 1;
+ break; break;
+ } }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET | #ifdef OTG_PLB_DMA_TASKLET
+ if (atomic_read(&release_later)) { if (atomic_read(&release_later)) {
+ break; break;
+ } }
+#endif #endif
+
+ /* Advance to next QH, skipping start-of-list /* Advance to next QH, skipping start-of-list
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodi _hcd->non_periodic_qh_ptr = _hcd->non_periodi
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_p if (_hcd->non_periodic_qh_ptr == &_hcd->non_p
+ _hcd->non_periodic_qh_ptr = _hcd->non _hcd->non_periodic_qh_ptr = _hcd->non
+ } }
+ } while (_hcd->non_periodic_qh_ptr != orig_qh_ptr); } while (_hcd->non_periodic_qh_ptr != orig_qh_ptr);
+ if (!_hcd->core_if->dma_enable) { if (!_hcd->core_if->dma_enable) {
+ gintmsk_data_t intr_mask = {.d32 = 0}; gintmsk_data_t intr_mask = {.d32 = 0};
+ intr_mask.b.nptxfempty = 1; intr_mask.b.nptxfempty = 1;
+
+#ifndef CONFIG_OTG_PLB_DMA_TASKLET | #ifndef OTG_PLB_DMA_TASKLET
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnpt tx_status.d32 = dwc_read_reg32(&global_regs->gnpt
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Spac DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Spac
+ tx_status.b.nptxqspcavail); tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Ava DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Ava
+ tx_status.b.nptxfspcavail); tx_status.b.nptxfspcavail);
+#endif /* */ #endif /* */
+#endif #endif
+
+ if (more_to_do || no_queue_space || no_fifo_space if (more_to_do || no_queue_space || no_fifo_space
+
+ /* /*
+ * May need to queue more transaction * May need to queue more transaction
+ * queue or Tx FIFO empties. Enable t * queue or Tx FIFO empties. Enable t
+ * Tx FIFO empty interrupt. (Always u * Tx FIFO empty interrupt. (Always u
+ * level to ensure that new requests * level to ensure that new requests
+ * soon as possible.) * soon as possible.)
+ */ */
+ dwc_modify_reg32(&global_regs->gintms dwc_modify_reg32(&global_regs->gintms
+ } else { } else {
+ /* /*
+ * Disable the Tx FIFO empty interrup * Disable the Tx FIFO empty interrup
+ * no more transactions that need to * no more transactions that need to
+ * now. This function is called from * now. This function is called from
+ * handlers to queue more transaction * handlers to queue more transaction
+ * states change. * states change.
+ */ */
+ dwc_modify_reg32(&global_regs->gintms dwc_modify_reg32(&global_regs->gintms
+ } }
+ } }
+} }
+
+/** /**
+ * Processes periodic channels for the next frame and queues * Processes periodic channels for the next frame and queues
+ * these channels to the DWC_otg controller. After queueing t * these channels to the DWC_otg controller. After queueing t
+ * Periodic Tx FIFO Empty interrupt is enabled if there are m * Periodic Tx FIFO Empty interrupt is enabled if there are m
+ * to queue as Periodic Tx FIFO or request queue space become * to queue as Periodic Tx FIFO or request queue space become
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disable * Otherwise, the Periodic Tx FIFO Empty interrupt is disable
+ */ */
+static void process_periodic_channels(dwc_otg_hcd_t * _hcd) static void process_periodic_channels(dwc_otg_hcd_t * _hcd)
+{ {
+ hptxsts_data_t tx_status; hptxsts_data_t tx_status;
+ struct list_head *qh_ptr; struct list_head *qh_ptr;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ int status; int status;
+ int no_queue_space = 0; int no_queue_space = 0;
+ int no_fifo_space = 0; int no_fifo_space = 0;
+ dwc_otg_host_global_regs_t * host_regs; dwc_otg_host_global_regs_t * host_regs;
+ host_regs = _hcd->core_if->host_if->host_global_regs; host_regs = _hcd->core_if->host_if->host_global_regs;
+ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n" DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail ( DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (
+ tx_status.b.ptxqspcavail); tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (befor DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (befor
+ tx_status.b.ptxfspcavail); tx_status.b.ptxfspcavail);
+
+#endif /* */ #endif /* */
+ qh_ptr = _hcd->periodic_sched_assigned.next; qh_ptr = _hcd->periodic_sched_assigned.next;
+ while (qh_ptr != &_hcd->periodic_sched_assigned) { while (qh_ptr != &_hcd->periodic_sched_assigned) {
+ tx_status.d32 = dwc_read_reg32(&host_regs->hp tx_status.d32 = dwc_read_reg32(&host_regs->hp
+ if (tx_status.b.ptxqspcavail == 0) { if (tx_status.b.ptxqspcavail == 0) {
+ no_queue_space = 1; no_queue_space = 1;
+ break; break;
+ } }
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list
+
+ /* /*
+ * Set a flag if we're queuing high-bandwidth * Set a flag if we're queuing high-bandwidth
+ * The flag prevents any halts to get into th * The flag prevents any halts to get into th
+ * the middle of multiple high-bandwidth pack * the middle of multiple high-bandwidth pack
+ */ */
+ if ((!_hcd->core_if->dma_enable) && if ((!_hcd->core_if->dma_enable) &&
+ (qh->channel->multi_count > 1)) { (qh->channel->multi_count > 1)) {
+ _hcd->core_if->queuing_high_bandwidth _hcd->core_if->queuing_high_bandwidth
+ } }
+ status = queue_transaction(_hcd, qh->channel, status = queue_transaction(_hcd, qh->channel,
+ if (status < 0) { if (status < 0) {
+ no_fifo_space = 1; no_fifo_space = 1;
+ break; break;
+ } }
+
+ /* /*
+ * In Slave mode, stay on the current transfe * In Slave mode, stay on the current transfe
+ * nothing more to do or the high-bandwidth r * nothing more to do or the high-bandwidth r
+ * reached. In DMA mode, only need to queue o * reached. In DMA mode, only need to queue o
+ * controller automatically handles multiple * controller automatically handles multiple
+ * high-bandwidth transfers. * high-bandwidth transfers.
+ */ */
+ if (_hcd->core_if->dma_enable || if (_hcd->core_if->dma_enable ||
+ (status == 0 || qh->channel->requests (status == 0 || qh->channel->requests
+ qh_ptr = qh_ptr->next; qh_ptr = qh_ptr->next;
+
+ /* /*
+ * Move the QH from the periodic assi * Move the QH from the periodic assi
+ * the periodic queued schedule. * the periodic queued schedule.
+ */ */
+ list_move(&qh->qh_list_entry, list_move(&qh->qh_list_entry,
+ &_hcd->periodic_sched_queued); &_hcd->periodic_sched_queued);
+
+ /* done queuing high bandwidth */ /* done queuing high bandwidth */
+ _hcd->core_if->queuing_high_bandwidth _hcd->core_if->queuing_high_bandwidth
+ } }
+ } }
+ if (!_hcd->core_if->dma_enable) { if (!_hcd->core_if->dma_enable) {
+ dwc_otg_core_global_regs_t * global_regs; dwc_otg_core_global_regs_t * global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0}; gintmsk_data_t intr_mask = {.d32 = 0};
+ global_regs = _hcd->core_if->core_global_regs global_regs = _hcd->core_if->core_global_regs
+ intr_mask.b.ptxfempty = 1; intr_mask.b.ptxfempty = 1;
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxst tx_status.d32 = dwc_read_reg32(&host_regs->hptxst
+ DWC_DEBUGPL(DBG_HCDV," P Tx Req Queue Space DWC_DEBUGPL(DBG_HCDV," P Tx Req Queue Space
+ tx_status.b.ptxqspcavail); tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV," P Tx FIFO Space Avail DWC_DEBUGPL(DBG_HCDV," P Tx FIFO Space Avail
+ tx_status.b.ptxfspcavail); tx_status.b.ptxfspcavail);
+
+#endif /* */ #endif /* */
+ if (!(list_empty(&_hcd->periodic_sched_assigned)) if (!(list_empty(&_hcd->periodic_sched_assigned))
+ || no_queue_space || no_fifo_space) { || no_queue_space || no_fifo_space) {
+
+ /* /*
+ * May need to queue more transaction * May need to queue more transaction
+ * queue or Tx FIFO empties. Enable t * queue or Tx FIFO empties. Enable t
+ * FIFO empty interrupt. (Always use * FIFO empty interrupt. (Always use
+ * level to ensure that new requests * level to ensure that new requests
+ * soon as possible.) * soon as possible.)
+ */ */
+ dwc_modify_reg32(&global_regs->gintms dwc_modify_reg32(&global_regs->gintms
+ } else { } else {
+ /* /*
+ * Disable the Tx FIFO empty interrup * Disable the Tx FIFO empty interrup
+ * no more transactions that need to * no more transactions that need to
+ * now. This function is called from * now. This function is called from
+ * handlers to queue more transaction * handlers to queue more transaction
+ * states change. * states change.
+ */ */
+ dwc_modify_reg32(&global_regs->gintms dwc_modify_reg32(&global_regs->gintms
+ } }
+ } }
+} }
+
+
+
+/** /**
+ * This function processes the currently active host channels * This function processes the currently active host channels
+ * transactions for these channels to the DWC_otg controller. * transactions for these channels to the DWC_otg controller.
+ * from HCD interrupt handler functions. * from HCD interrupt handler functions.
+ * *
+ * @param _hcd The HCD state structure. * @param _hcd The HCD state structure.
+ * @param _tr_type The type(s) of transactions to queue (non- * @param _tr_type The type(s) of transactions to queue (non-
+ * periodic, or both). * periodic, or both).
+ */ */
+void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t * _hcd, void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t * _hcd,
+ dwc_otg_transaction_type_ dwc_otg_transaction_type_
+{ {
+
+#ifdef DEBUG_SOF #ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n"); DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
+
+#endif /* */ #endif /* */
+ /* Process host channels associated with periodic /* Process host channels associated with periodic
+ if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC
+ || _tr_type == DWC_OTG_TRANSACTION_ALL) || _tr_type == DWC_OTG_TRANSACTION_ALL)
+ && !list_empty(&_hcd->periodic_sched_assigned && !list_empty(&_hcd->periodic_sched_assigned
+ process_periodic_channels(_hcd); process_periodic_channels(_hcd);
+ } }
+
+ /* Process host channels associated with non-periodic /* Process host channels associated with non-periodic
+ if ((_tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC if ((_tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC
+ || _tr_type == DWC_OTG_TRANSACTION_ALL)) { || _tr_type == DWC_OTG_TRANSACTION_ALL)) {
+ if (!list_empty(&_hcd->non_periodic_sched_act if (!list_empty(&_hcd->non_periodic_sched_act
+ process_non_periodic_channels(_hcd); process_non_periodic_channels(_hcd);
+ } else { } else {
+ /* /*
+ * Ensure NP Tx FIFO empty interrupt * Ensure NP Tx FIFO empty interrupt
+ * there are no non-periodic transfer * there are no non-periodic transfer
+ */ */
+ gintmsk_data_t gintmsk = {.d32 = 0}; gintmsk_data_t gintmsk = {.d32 = 0};
+ gintmsk.b.nptxfempty = 1; gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&_hcd->core_if->core dwc_modify_reg32(&_hcd->core_if->core
+ } }
+ } }
+} }
+
+/** /**
+ * Sets the final status of an URB and returns it to the devi * Sets the final status of an URB and returns it to the devi
+ * required cleanup of the URB is performed. * required cleanup of the URB is performed.
+ */ */
+void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t * _hcd, struct ur void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t * _hcd, struct ur
+ int _status) int _status)
+__releases(_hcd->lock) __releases(_hcd->lock)
+__acquires(_hcd->lock) __acquires(_hcd->lock)
+{ {
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("%s: urb %p, device %d, ep %d %s, s DWC_PRINT("%s: urb %p, device %d, ep %d %s, s
+ __func__, _urb, usb_pipedevice(_ur __func__, _urb, usb_pipedevice(_ur
+ usb_pipeendpoint(_urb->pipe), usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) ? "IN" : "O usb_pipein(_urb->pipe) ? "IN" : "O
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRON if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRON
+ int i; int i;
+ for (i = 0; i < _urb->number_of_packe for (i = 0; i < _urb->number_of_packe
+ DWC_PRINT(" ISO Desc %d stat DWC_PRINT(" ISO Desc %d stat
+ _urb->iso_frame_de _urb->iso_frame_de
+ } }
+ } }
+ } }
+
+#endif /* */ #endif /* */
+ spin_lock(&_hcd->lock); <
+ _urb->hcpriv = NULL; _urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd),
+ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb, <
+ spin_unlock(&_hcd->lock); spin_unlock(&_hcd->lock);
+ > usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb,
+ > spin_lock(&_hcd->lock);
+} }
+
+
+/* /*
+ * Returns the Queue Head for an URB. * Returns the Queue Head for an URB.
+ */ */
+dwc_otg_qh_t * dwc_urb_to_qh(struct urb *_urb) dwc_otg_qh_t * dwc_urb_to_qh(struct urb *_urb)
+{ {
+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(_u struct usb_host_endpoint *ep = dwc_urb_to_endpoint(_u
+ return (dwc_otg_qh_t *) ep->hcpriv; return (dwc_otg_qh_t *) ep->hcpriv;
+} }
+
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+void dwc_print_setup_data(uint8_t * setup) void dwc_print_setup_data(uint8_t * setup)
+{ {
+ int i; int i;
+ if (CHK_DEBUG_LEVEL(DBG_HCD)) { if (CHK_DEBUG_LEVEL(DBG_HCD)) {
+ DWC_PRINT("Setup Data = MSB "); DWC_PRINT("Setup Data = MSB ");
+ for (i = 7; i >= 0; i--) for (i = 7; i >= 0; i--)
+ DWC_PRINT("%02x ", setup[i]); DWC_PRINT("%02x ", setup[i]);
+ DWC_PRINT("\n"); DWC_PRINT("\n");
+ DWC_PRINT(" bmRequestType Tranfer = %s\n", DWC_PRINT(" bmRequestType Tranfer = %s\n",
+ (setup[0] & 0x80) ? "Device-to-Hos (setup[0] & 0x80) ? "Device-to-Hos
+ "Host-to-Device"); "Host-to-Device");
+ DWC_PRINT(" bmRequestType Type = "); DWC_PRINT(" bmRequestType Type = ");
+ switch ((setup[0] & 0x60) >> 5) { switch ((setup[0] & 0x60) >> 5) {
+ case 0: case 0:
+ DWC_PRINT("Standard\n"); DWC_PRINT("Standard\n");
+ break; break;
+ case 1: case 1:
+ DWC_PRINT("Class\n"); DWC_PRINT("Class\n");
+ break; break;
+ case 2: case 2:
+ DWC_PRINT("Vendor\n"); DWC_PRINT("Vendor\n");
+ break; break;
+ case 3: case 3:
+ DWC_PRINT("Reserved\n"); DWC_PRINT("Reserved\n");
+ break; break;
+ } }
+ DWC_PRINT(" bmRequestType Recipient = "); DWC_PRINT(" bmRequestType Recipient = ");
+ switch (setup[0] & 0x1f) { switch (setup[0] & 0x1f) {
+ case 0: case 0:
+ DWC_PRINT("Device\n"); DWC_PRINT("Device\n");
+ break; break;
+ case 1: case 1:
+ DWC_PRINT("Interface\n"); DWC_PRINT("Interface\n");
+ break; break;
+ case 2: case 2:
+ DWC_PRINT("Endpoint\n"); DWC_PRINT("Endpoint\n");
+ break; break;
+ case 3: case 3:
+ DWC_PRINT("Other\n"); DWC_PRINT("Other\n");
+ break; break;
+ default: default:
+ DWC_PRINT("Reserved\n"); DWC_PRINT("Reserved\n");
+ break; break;
+ } }
+ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]); DWC_PRINT(" bRequest = 0x%0x\n", setup[1]);
+ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t * DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *
+ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t * DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *
+ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_
+ } }
+} }
+
+
+#endif /* */ #endif /* */
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd) void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd)
+{ {
+
+/* /*
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ DWC_PRINT("Frame remaining at SOF:\n"); DWC_PRINT("Frame remaining at SOF:\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->frrem_samples, _hcd->frrem_accum, _hcd->frrem_samples, _hcd->frrem_accum,
+ (_hcd->frrem_samples > 0) ? (_hcd->frrem_samples > 0) ?
+ _hcd->frrem_accum/_hcd->frrem_samples : 0); _hcd->frrem_accum/_hcd->frrem_samples : 0);
+
+ DWC_PRINT("\n"); DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at start_transfer (uframe DWC_PRINT("Frame remaining at start_transfer (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_7_samples, _hcd->core_ _hcd->core_if->hfnum_7_samples, _hcd->core_
+ (_hcd->core_if->hfnum_7_samples > 0) ? (_hcd->core_if->hfnum_7_samples > 0) ?
+ _hcd->core_if->hfnum_7_frrem_accum/_hcd->co _hcd->core_if->hfnum_7_frrem_accum/_hcd->co
+ DWC_PRINT("Frame remaining at start_transfer (uframe DWC_PRINT("Frame remaining at start_transfer (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_0_samples, _hcd->core_ _hcd->core_if->hfnum_0_samples, _hcd->core_
+ (_hcd->core_if->hfnum_0_samples > 0) ? (_hcd->core_if->hfnum_0_samples > 0) ?
+ _hcd->core_if->hfnum_0_frrem_accum/_hcd->co _hcd->core_if->hfnum_0_frrem_accum/_hcd->co
+ DWC_PRINT("Frame remaining at start_transfer (uframe DWC_PRINT("Frame remaining at start_transfer (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_other_samples, _hcd->c _hcd->core_if->hfnum_other_samples, _hcd->c
+ (_hcd->core_if->hfnum_other_samples > 0) ? (_hcd->core_if->hfnum_other_samples > 0) ?
+ _hcd->core_if->hfnum_other_frrem_accum/_hcd _hcd->core_if->hfnum_other_frrem_accum/_hcd
+
+ DWC_PRINT("\n"); DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point A (uframe DWC_PRINT("Frame remaining at sample point A (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_a, _hcd->hfnum_7_frre _hcd->hfnum_7_samples_a, _hcd->hfnum_7_frre
+ (_hcd->hfnum_7_samples_a > 0) ? (_hcd->hfnum_7_samples_a > 0) ?
+ _hcd->hfnum_7_frrem_accum_a/_hcd->hfnum_7_s _hcd->hfnum_7_frrem_accum_a/_hcd->hfnum_7_s
+ DWC_PRINT("Frame remaining at sample point A (uframe DWC_PRINT("Frame remaining at sample point A (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_a, _hcd->hfnum_0_frre _hcd->hfnum_0_samples_a, _hcd->hfnum_0_frre
+ (_hcd->hfnum_0_samples_a > 0) ? (_hcd->hfnum_0_samples_a > 0) ?
+ _hcd->hfnum_0_frrem_accum_a/_hcd->hfnum_0_s _hcd->hfnum_0_frrem_accum_a/_hcd->hfnum_0_s
+ DWC_PRINT("Frame remaining at sample point A (uframe DWC_PRINT("Frame remaining at sample point A (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_a, _hcd->hfnum_ot _hcd->hfnum_other_samples_a, _hcd->hfnum_ot
+ (_hcd->hfnum_other_samples_a > 0) ? (_hcd->hfnum_other_samples_a > 0) ?
+ _hcd->hfnum_other_frrem_accum_a/_hcd->hfnum _hcd->hfnum_other_frrem_accum_a/_hcd->hfnum
+
+ DWC_PRINT("\n"); DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point B (uframe DWC_PRINT("Frame remaining at sample point B (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_b, _hcd->hfnum_7_frre _hcd->hfnum_7_samples_b, _hcd->hfnum_7_frre
+ (_hcd->hfnum_7_samples_b > 0) ? (_hcd->hfnum_7_samples_b > 0) ?
+ _hcd->hfnum_7_frrem_accum_b/_hcd->hfnum_7_s _hcd->hfnum_7_frrem_accum_b/_hcd->hfnum_7_s
+ DWC_PRINT("Frame remaining at sample point B (uframe DWC_PRINT("Frame remaining at sample point B (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_b, _hcd->hfnum_0_frre _hcd->hfnum_0_samples_b, _hcd->hfnum_0_frre
+ (_hcd->hfnum_0_samples_b > 0) ? (_hcd->hfnum_0_samples_b > 0) ?
+ _hcd->hfnum_0_frrem_accum_b/_hcd->hfnum_0_s _hcd->hfnum_0_frrem_accum_b/_hcd->hfnum_0_s
+ DWC_PRINT("Frame remaining at sample point B (uframe DWC_PRINT("Frame remaining at sample point B (uframe
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_b, _hcd->hfnum_ot _hcd->hfnum_other_samples_b, _hcd->hfnum_ot
+ (_hcd->hfnum_other_samples_b > 0) ? (_hcd->hfnum_other_samples_b > 0) ?
+ _hcd->hfnum_other_frrem_accum_b/_hcd->hfnum _hcd->hfnum_other_frrem_accum_b/_hcd->hfnum
+#endif #endif
+*/ */
+} void dwc_otg_hcd_dump_state(dwc_otg_hcd_t * _hcd) } void dwc_otg_hcd_dump_state(dwc_otg_hcd_t * _hcd)
+{ {
+
+#ifdef CONFIG_DWC_DEBUG | #ifdef DEBUG
+ int num_channels; int num_channels;
+ int i; int i;
+ gnptxsts_data_t np_tx_status; gnptxsts_data_t np_tx_status;
+ hptxsts_data_t p_tx_status; hptxsts_data_t p_tx_status;
+ num_channels = _hcd->core_if->core_params->host_chann num_channels = _hcd->core_if->core_params->host_chann
+ DWC_PRINT("\n"); DWC_PRINT("\n");
+ DWC_PRINT DWC_PRINT
+ ("*********************************************** ("***********************************************
+ DWC_PRINT("HCD State:\n"); DWC_PRINT("HCD State:\n");
+ DWC_PRINT(" Num channels: %d\n", num_channels); DWC_PRINT(" Num channels: %d\n", num_channels);
+ for (i = 0; i < num_channels; i++) { for (i = 0; i < num_channels; i++) {
+ dwc_hc_t * hc = _hcd->hc_ptr_array[i]; dwc_hc_t * hc = _hcd->hc_ptr_array[i];
+ DWC_PRINT(" Channel %d:\n", i); DWC_PRINT(" Channel %d:\n", i);
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_i DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_i
+ hc->dev_addr, hc->ep_num, hc->ep_i hc->dev_addr, hc->ep_num, hc->ep_i
+ DWC_PRINT(" speed: %d\n", hc->speed); DWC_PRINT(" speed: %d\n", hc->speed);
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type); DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_pac DWC_PRINT(" max_packet: %d\n", hc->max_pac
+ DWC_PRINT(" data_pid_start: %d\n", hc->dat DWC_PRINT(" data_pid_start: %d\n", hc->dat
+ DWC_PRINT(" multi_count: %d\n", hc->multi_ DWC_PRINT(" multi_count: %d\n", hc->multi_
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_ DWC_PRINT(" xfer_started: %d\n", hc->xfer_
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buf DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buf
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len) DWC_PRINT(" xfer_len: %d\n", hc->xfer_len)
+ DWC_PRINT(" xfer_count: %d\n", hc->xfer_co DWC_PRINT(" xfer_count: %d\n", hc->xfer_co
+ DWC_PRINT(" halt_on_queue: %d\n", hc->halt DWC_PRINT(" halt_on_queue: %d\n", hc->halt
+ DWC_PRINT(" halt_pending: %d\n", hc->halt_ DWC_PRINT(" halt_pending: %d\n", hc->halt_
+ DWC_PRINT(" halt_status: %d\n", hc->halt_s DWC_PRINT(" halt_status: %d\n", hc->halt_s
+ DWC_PRINT(" do_split: %d\n", hc->do_split) DWC_PRINT(" do_split: %d\n", hc->do_split)
+ DWC_PRINT(" complete_split: %d\n", hc->com DWC_PRINT(" complete_split: %d\n", hc->com
+ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr) DWC_PRINT(" hub_addr: %d\n", hc->hub_addr)
+ DWC_PRINT(" port_addr: %d\n", hc->port_add DWC_PRINT(" port_addr: %d\n", hc->port_add
+ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos) DWC_PRINT(" xact_pos: %d\n", hc->xact_pos)
+ DWC_PRINT(" requests: %d\n", hc->requests) DWC_PRINT(" requests: %d\n", hc->requests)
+ DWC_PRINT(" qh: %p\n", hc->qh); DWC_PRINT(" qh: %p\n", hc->qh);
+ if (hc->xfer_started) { if (hc->xfer_started) {
+ hfnum_data_t hfnum; hfnum_data_t hfnum;
+ hcchar_data_t hcchar; hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ hcint_data_t hcint; hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk; hcintmsk_data_t hcintmsk;
+ hfnum.d32 = hfnum.d32 =
+ dwc_read_reg32(&_hcd->core_if->ho dwc_read_reg32(&_hcd->core_if->ho
+ host_global_regs-> host_global_regs->
+ hcchar.d32 = hcchar.d32 =
+ dwc_read_reg32(&_hcd->core_if->ho dwc_read_reg32(&_hcd->core_if->ho
+ hcchar); hcchar);
+ hctsiz.d32 = hctsiz.d32 =
+ dwc_read_reg32(&_hcd->core_if->ho dwc_read_reg32(&_hcd->core_if->ho
+ hctsiz); hctsiz);
+ hcint.d32 = hcint.d32 =
+ dwc_read_reg32(&_hcd->core_if->ho dwc_read_reg32(&_hcd->core_if->ho
+ hcint); hcint);
+ hcintmsk.d32 = hcintmsk.d32 =
+ dwc_read_reg32(&_hcd->core_if->ho dwc_read_reg32(&_hcd->core_if->ho
+ hcintmsk); hcintmsk);
+ DWC_PRINT(" hfnum: 0x%08x\n", hfnu DWC_PRINT(" hfnum: 0x%08x\n", hfnu
+ DWC_PRINT(" hcchar: 0x%08x\n", hcc DWC_PRINT(" hcchar: 0x%08x\n", hcc
+ DWC_PRINT(" hctsiz: 0x%08x\n", hct DWC_PRINT(" hctsiz: 0x%08x\n", hct
+ DWC_PRINT(" hcint: 0x%08x\n", hcin DWC_PRINT(" hcint: 0x%08x\n", hcin
+ DWC_PRINT(" hcintmsk: 0x%08x\n", h DWC_PRINT(" hcintmsk: 0x%08x\n", h
+ } }
+ if (hc->xfer_started && (hc->qh != NULL) if (hc->xfer_started && (hc->qh != NULL)
+ && (hc->qh->qtd_in_process != NULL)) { && (hc->qh->qtd_in_process != NULL)) {
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+ struct urb *urb; struct urb *urb;
+ qtd = hc->qh->qtd_in_process; qtd = hc->qh->qtd_in_process;
+ urb = qtd->urb; urb = qtd->urb;
+ DWC_PRINT(" URB Info:\n"); DWC_PRINT(" URB Info:\n");
+ DWC_PRINT(" qtd: %p, urb: %p\n", DWC_PRINT(" qtd: %p, urb: %p\n",
+ if (urb != NULL) { if (urb != NULL) {
+ DWC_PRINT(" Dev: %d, EP: DWC_PRINT(" Dev: %d, EP:
+ usb_pipedevice(urb usb_pipedevice(urb
+ usb_pipeendpoint(u usb_pipeendpoint(u
+ usb_pipein(urb-> usb_pipein(urb->
+ pipe) pipe)
+ DWC_PRINT(" Max packet s DWC_PRINT(" Max packet s
+ usb_maxpacket(urb- usb_maxpacket(urb-
+ usb usb
+
+ DWC_PRINT(" transfer_buf DWC_PRINT(" transfer_buf
+ urb->transfer_buff urb->transfer_buff
+ DWC_PRINT(" transfer_dma DWC_PRINT(" transfer_dma
+ (void *)urb->trans (void *)urb->trans
+ DWC_PRINT(" transfer_buf DWC_PRINT(" transfer_buf
+ urb->transfer_buff urb->transfer_buff
+ DWC_PRINT(" actual_lengt DWC_PRINT(" actual_lengt
+ urb->actual_length urb->actual_length
+ } }
+ } }
+ } | } DWC_PRINT(" non_periodic_channels: %d\n",
+ > _hcd->non_periodic_channels);
+ > DWC_PRINT(" periodic_channels: %d\n", _hcd->periodic
+ DWC_PRINT(" periodic_usecs: %d\n", _hcd->periodic_us DWC_PRINT(" periodic_usecs: %d\n", _hcd->periodic_us
+ np_tx_status.d32 = np_tx_status.d32 =
+ dwc_read_reg32(&_hcd->core_if->core_global_regs-> dwc_read_reg32(&_hcd->core_if->core_global_regs->
+ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n",
+ np_tx_status.b.nptxqspcavail); np_tx_status.b.nptxqspcavail);
+ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", DWC_PRINT(" NP Tx FIFO Space Avail: %d\n",
+ np_tx_status.b.nptxfspcavail); np_tx_status.b.nptxfspcavail);
+ p_tx_status.d32 = p_tx_status.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->host_glob dwc_read_reg32(&_hcd->core_if->host_if->host_glob
+ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", DWC_PRINT(" P Tx Req Queue Space Avail: %d\n",
+ p_tx_status.b.ptxqspcavail); p_tx_status.b.ptxqspcavail);
+ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_statu DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_statu
+ dwc_otg_hcd_dump_frrem(_hcd); dwc_otg_hcd_dump_frrem(_hcd);
+ dwc_otg_dump_global_registers(_hcd->core_if); dwc_otg_dump_global_registers(_hcd->core_if);
+ dwc_otg_dump_host_registers(_hcd->core_if); dwc_otg_dump_host_registers(_hcd->core_if);
+ DWC_PRINT DWC_PRINT
+ ("*********************************************** ("***********************************************
+ DWC_PRINT("\n"); DWC_PRINT("\n");
+
+#endif /* */ #endif /* */
+} }
+#endif /* CONFIG_DWC_DEVICE_ONLY */ | #endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.h.sdiff b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.h.sdiff
new file mode 100644
index 00000000000..b4932c6465c
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd.h.sdiff
@@ -0,0 +1,675 @@
+/* ========================================================== /* ==========================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers
+ * $Revision: #6 $ * $Revision: #6 $
+ * $Date: 2006/12/05 $ * $Date: 2006/12/05 $
+ * $Change: 762293 $ * $Change: 762293 $
+ * *
+ * Synopsys HS OTG Linux Software Driver and documentation (h * Synopsys HS OTG Linux Software Driver and documentation (h
+ * "Software") is an Unsupported proprietary work of Synopsys * "Software") is an Unsupported proprietary work of Synopsys
+ * otherwise expressly agreed to in writing between Synopsys * otherwise expressly agreed to in writing between Synopsys
+ * *
+ * The Software IS NOT an item of Licensed Software or Licens * The Software IS NOT an item of Licensed Software or Licens
+ * any End User Software License Agreement or Agreement for L * any End User Software License Agreement or Agreement for L
+ * with Synopsys or any supplement thereto. You are permitted * with Synopsys or any supplement thereto. You are permitted
+ * redistribute this Software in source and binary forms, wit * redistribute this Software in source and binary forms, wit
+ * modification, provided that redistributions of source code * modification, provided that redistributions of source code
+ * notice. You may not view, use, disclose, copy or distribut * notice. You may not view, use, disclose, copy or distribut
+ * any information contained herein except pursuant to this l * any information contained herein except pursuant to this l
+ * Synopsys. If you do not agree with this notice, including * Synopsys. If you do not agree with this notice, including
+ * below, then you are not authorized to use the Software. * below, then you are not authorized to use the Software.
+ * *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO
+ * DAMAGE. * DAMAGE.
+ * ========================================================== * ==========================================================
+#ifndef CONFIG_DWC_DEVICE_ONLY #ifndef CONFIG_DWC_DEVICE_ONLY
+#if !defined(__DWC_HCD_H__) #if !defined(__DWC_HCD_H__)
+#define __DWC_HCD_H__ #define __DWC_HCD_H__
+
+#include <linux/list.h> #include <linux/list.h>
+#include <linux/usb.h> #include <linux/usb.h>
+#include <linux/usb/hcd.h> | #include <../drivers/usb/core/hcd.h>
+
+struct lm_device; struct lm_device;
+struct dwc_otg_device; struct dwc_otg_device;
+
+#include "dwc_otg_cil.h" #include "dwc_otg_cil.h"
+
+/** /**
+ * @file * @file
+ * *
+ * This file contains the structures, constants, and interfac * This file contains the structures, constants, and interfac
+ * the Host Contoller Driver (HCD). * the Host Contoller Driver (HCD).
+ * *
+ * The Host Controller Driver (HCD) is responsible for transl * The Host Controller Driver (HCD) is responsible for transl
+ * from the USB Driver into the appropriate actions on the DW * from the USB Driver into the appropriate actions on the DW
+ * It isolates the USBD from the specifics of the controller * It isolates the USBD from the specifics of the controller
+ * API to the USBD. * API to the USBD.
+ */ */
+
+/** /**
+ * Phases for control transfers. * Phases for control transfers.
+ */ */
+typedef enum dwc_otg_control_phase { typedef enum dwc_otg_control_phase {
+ DWC_OTG_CONTROL_SETUP, DWC_OTG_CONTROL_SETUP,
+ DWC_OTG_CONTROL_DATA, DWC_OTG_CONTROL_DATA,
+ DWC_OTG_CONTROL_STATUS DWC_OTG_CONTROL_STATUS
+} dwc_otg_control_phase_e; } dwc_otg_control_phase_e;
+
+/** Transaction types. */ /** Transaction types. */
+typedef enum dwc_otg_transaction_type { typedef enum dwc_otg_transaction_type {
+ DWC_OTG_TRANSACTION_NONE, DWC_OTG_TRANSACTION_NONE,
+ DWC_OTG_TRANSACTION_PERIODIC, DWC_OTG_TRANSACTION_PERIODIC,
+ DWC_OTG_TRANSACTION_NON_PERIODIC, DWC_OTG_TRANSACTION_NON_PERIODIC,
+ DWC_OTG_TRANSACTION_ALL DWC_OTG_TRANSACTION_ALL
+} dwc_otg_transaction_type_e; } dwc_otg_transaction_type_e;
+
+/** /**
+ * A Queue Transfer Descriptor (QTD) holds the state of a bul * A Queue Transfer Descriptor (QTD) holds the state of a bul
+ * interrupt, or isochronous transfer. A single QTD is create * interrupt, or isochronous transfer. A single QTD is create
+ * (of one of these types) submitted to the HCD. The transfer * (of one of these types) submitted to the HCD. The transfer
+ * a QTD may require one or multiple transactions. * a QTD may require one or multiple transactions.
+ * *
+ * A QTD is linked to a Queue Head, which is entered in eithe * A QTD is linked to a Queue Head, which is entered in eithe
+ * non-periodic or periodic schedule for execution. When a QT * non-periodic or periodic schedule for execution. When a QT
+ * execution, some or all of its transactions may be executed * execution, some or all of its transactions may be executed
+ * execution, the state of the QTD is updated. The QTD may be * execution, the state of the QTD is updated. The QTD may be
+ * its transactions are complete or if an error occurred. Oth * its transactions are complete or if an error occurred. Oth
+ * remains in the schedule so more transactions can be execut * remains in the schedule so more transactions can be execut
+ */ */
+
+struct dwc_otg_qh; struct dwc_otg_qh;
+
+typedef struct dwc_otg_qtd { typedef struct dwc_otg_qtd {
+ /** /**
+ * Determines the PID of the next data packet for the * Determines the PID of the next data packet for the
+ * control transfers. Ignored for other transfer type * control transfers. Ignored for other transfer type
+ * One of the following values: * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0 * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1 * - DWC_OTG_HC_PID_DATA1
+ */ */
+ uint8_t data_toggle; uint8_t data_toggle;
+
+ /** Current phase for control transfers (Setup, Data, /** Current phase for control transfers (Setup, Data,
+ dwc_otg_control_phase_e control_phase; dwc_otg_control_phase_e control_phase;
+
+ /** Keep track of the current split type /** Keep track of the current split type
+ * for FS/LS endpoints on a HS Hub */ * for FS/LS endpoints on a HS Hub */
+ uint8_t complete_split; uint8_t complete_split;
+
+ /** How many bytes transferred during SSPLIT OUT */ /** How many bytes transferred during SSPLIT OUT */
+ uint32_t ssplit_out_xfer_count; uint32_t ssplit_out_xfer_count;
+
+ /** /**
+ * Holds the number of bus errors that have occurred * Holds the number of bus errors that have occurred
+ * within this transfer. * within this transfer.
+ */ */
+ uint8_t error_count; uint8_t error_count;
+
+ /** /**
+ * Index of the next frame descriptor for an isochron * Index of the next frame descriptor for an isochron
+ * frame descriptor describes the buffer position and * frame descriptor describes the buffer position and
+ * data to be transferred in the next scheduled (micr * data to be transferred in the next scheduled (micr
+ * isochronous transfer. It also holds status for tha * isochronous transfer. It also holds status for tha
+ * The frame index starts at 0. * The frame index starts at 0.
+ */ */
+ int isoc_frame_index; int isoc_frame_index;
+
+ /** Position of the ISOC split on full/low speed */ /** Position of the ISOC split on full/low speed */
+ uint8_t isoc_split_pos; uint8_t isoc_split_pos;
+
+ /** Position of the ISOC split in the buffer for the /** Position of the ISOC split in the buffer for the
+ uint16_t isoc_split_offset; uint16_t isoc_split_offset;
+
+ /** URB for this transfer */ /** URB for this transfer */
+ struct urb *urb; struct urb *urb;
+
+ /** This list of QTDs */ /** This list of QTDs */
+ struct list_head qtd_list_entry; struct list_head qtd_list_entry;
+
+ /* Field to track the qh pointer */ /* Field to track the qh pointer */
+ struct dwc_otg_qh *qtd_qh_ptr; struct dwc_otg_qh *qtd_qh_ptr;
+} dwc_otg_qtd_t; } dwc_otg_qtd_t;
+
+/** /**
+ * A Queue Head (QH) holds the static characteristics of an e * A Queue Head (QH) holds the static characteristics of an e
+ * maintains a list of transfers (QTDs) for that endpoint. A * maintains a list of transfers (QTDs) for that endpoint. A
+ * be entered in either the non-periodic or periodic schedule * be entered in either the non-periodic or periodic schedule
+ */ */
+typedef struct dwc_otg_qh { typedef struct dwc_otg_qh {
+ /** /**
+ * Endpoint type. * Endpoint type.
+ * One of the following values: * One of the following values:
+ * - USB_ENDPOINT_XFER_CONTROL * - USB_ENDPOINT_XFER_CONTROL
+ * - USB_ENDPOINT_XFER_ISOC * - USB_ENDPOINT_XFER_ISOC
+ * - USB_ENDPOINT_XFER_BULK * - USB_ENDPOINT_XFER_BULK
+ * - USB_ENDPOINT_XFER_INT * - USB_ENDPOINT_XFER_INT
+ */ */
+ uint8_t ep_type; uint8_t ep_type;
+ uint8_t ep_is_in; uint8_t ep_is_in;
+
+ /** wMaxPacketSize Field of Endpoint Descriptor. */ /** wMaxPacketSize Field of Endpoint Descriptor. */
+ uint16_t maxp; uint16_t maxp;
+
+ /** /**
+ * Determines the PID of the next data packet for non * Determines the PID of the next data packet for non
+ * transfers. Ignored for control transfers.<br> * transfers. Ignored for control transfers.<br>
+ * One of the following values: * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0 * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1 * - DWC_OTG_HC_PID_DATA1
+ */ */
+ uint8_t data_toggle; uint8_t data_toggle;
+
+ /** Ping state if 1. */ /** Ping state if 1. */
+ uint8_t ping_state; uint8_t ping_state;
+
+ /** /**
+ * List of QTDs for this QH. * List of QTDs for this QH.
+ */ */
+ struct list_head qtd_list; struct list_head qtd_list;
+
+ /** Host channel currently processing transfers for t /** Host channel currently processing transfers for t
+ dwc_hc_t *channel; dwc_hc_t *channel;
+
+ /** QTD currently assigned to a host channel for this /** QTD currently assigned to a host channel for this
+ dwc_otg_qtd_t *qtd_in_process; dwc_otg_qtd_t *qtd_in_process;
+
+ /** Full/low speed endpoint on high-speed hub require /** Full/low speed endpoint on high-speed hub require
+ uint8_t do_split; uint8_t do_split;
+
+ /** @name Periodic schedule information */ /** @name Periodic schedule information */
+ /** @{ */ /** @{ */
+
+ /** Bandwidth in microseconds per (micro)frame. */ /** Bandwidth in microseconds per (micro)frame. */
+ uint8_t usecs; uint8_t usecs;
+
+ /** Interval between transfers in (micro)frames. */ /** Interval between transfers in (micro)frames. */
+ uint16_t interval; uint16_t interval;
+
+ /** /**
+ * (micro)frame to initialize a periodic transfer. Th * (micro)frame to initialize a periodic transfer. Th
+ * executes in the following (micro)frame. * executes in the following (micro)frame.
+ */ */
+ uint16_t sched_frame; uint16_t sched_frame;
+
+ /** (micro)frame at which last start split was initia /** (micro)frame at which last start split was initia
+ uint16_t start_split_frame; uint16_t start_split_frame;
+
+ /** @} */ /** @} */
+
+ uint16_t speed; <
+ uint16_t frame_usecs[8]; <
+ <
+ /** Entry for QH in either the periodic or non-period /** Entry for QH in either the periodic or non-period
+ struct list_head qh_list_entry; struct list_head qh_list_entry;
+} dwc_otg_qh_t; } dwc_otg_qh_t;
+
+/** /**
+ * This structure holds the state of the HCD, including the n * This structure holds the state of the HCD, including the n
+ * periodic schedules. * periodic schedules.
+ */ */
+typedef struct dwc_otg_hcd { typedef struct dwc_otg_hcd {
+
+ spinlock_t lock; spinlock_t lock;
+
+ /** DWC OTG Core Interface Layer */ /** DWC OTG Core Interface Layer */
+ dwc_otg_core_if_t *core_if; dwc_otg_core_if_t *core_if;
+
+ /** Internal DWC HCD Flags */ /** Internal DWC HCD Flags */
+ volatile union dwc_otg_hcd_internal_flags { volatile union dwc_otg_hcd_internal_flags {
+ uint32_t d32; uint32_t d32;
+ struct { struct {
+ unsigned port_connect_status_change : unsigned port_connect_status_change :
+ unsigned port_connect_status : 1; unsigned port_connect_status : 1;
+ unsigned port_reset_change : 1; unsigned port_reset_change : 1;
+ unsigned port_enable_change : 1; unsigned port_enable_change : 1;
+ unsigned port_suspend_change : 1; unsigned port_suspend_change : 1;
+ unsigned port_over_current_change : 1 unsigned port_over_current_change : 1
+ unsigned reserved : 27; unsigned reserved : 27;
+ } b; } b;
+ } flags; } flags;
+
+ /** /**
+ * Inactive items in the non-periodic schedule. This * Inactive items in the non-periodic schedule. This
+ * Queue Heads. Transfers associated with these Queue * Queue Heads. Transfers associated with these Queue
+ * currently assigned to a host channel. * currently assigned to a host channel.
+ */ */
+ struct list_head non_periodic_sched_inactive; struct list_head non_periodic_sched_inactive;
+
+ /** /**
+ * Deferred items in the non-periodic schedule. This * Deferred items in the non-periodic schedule. This
+ * Queue Heads. Transfers associated with these Queue * Queue Heads. Transfers associated with these Queue
+ * currently assigned to a host channel. * currently assigned to a host channel.
+ * When we get an NAK, the QH goes here. * When we get an NAK, the QH goes here.
+ */ */
+ struct list_head non_periodic_sched_deferred; struct list_head non_periodic_sched_deferred;
+
+ /** /**
+ * Active items in the non-periodic schedule. This is * Active items in the non-periodic schedule. This is
+ * Queue Heads. Transfers associated with these Queue * Queue Heads. Transfers associated with these Queue
+ * currently assigned to a host channel. * currently assigned to a host channel.
+ */ */
+ struct list_head non_periodic_sched_active; struct list_head non_periodic_sched_active;
+
+ /** /**
+ * Pointer to the next Queue Head to process in the a * Pointer to the next Queue Head to process in the a
+ * non-periodic schedule. * non-periodic schedule.
+ */ */
+ struct list_head *non_periodic_qh_ptr; struct list_head *non_periodic_qh_ptr;
+
+ /** /**
+ * Inactive items in the periodic schedule. This is a * Inactive items in the periodic schedule. This is a
+ * periodic transfers that are _not_ scheduled for th * periodic transfers that are _not_ scheduled for th
+ * Each QH in the list has an interval counter that d * Each QH in the list has an interval counter that d
+ * needs to be scheduled for execution. This scheduli * needs to be scheduled for execution. This scheduli
+ * allows only a simple calculation for periodic band * allows only a simple calculation for periodic band
+ * must assume that all periodic transfers may need t * must assume that all periodic transfers may need t
+ * same frame). However, it greatly simplifies schedu * same frame). However, it greatly simplifies schedu
+ * be sufficient for the vast majority of OTG hosts, * be sufficient for the vast majority of OTG hosts,
+ * connect to a small number of peripherals at one ti * connect to a small number of peripherals at one ti
+ * *
+ * Items move from this list to periodic_sched_ready * Items move from this list to periodic_sched_ready
+ * interval counter is 0 at SOF. * interval counter is 0 at SOF.
+ */ */
+ struct list_head periodic_sched_inactive; struct list_head periodic_sched_inactive;
+
+ /** /**
+ * List of periodic QHs that are ready for execution * List of periodic QHs that are ready for execution
+ * frame, but have not yet been assigned to host chan * frame, but have not yet been assigned to host chan
+ * *
+ * Items move from this list to periodic_sched_assign * Items move from this list to periodic_sched_assign
+ * channels become available during the current frame * channels become available during the current frame
+ */ */
+ struct list_head periodic_sched_ready; struct list_head periodic_sched_ready;
+
+ /** /**
+ * List of periodic QHs to be executed in the next fr * List of periodic QHs to be executed in the next fr
+ * assigned to host channels. * assigned to host channels.
+ * *
+ * Items move from this list to periodic_sched_queued * Items move from this list to periodic_sched_queued
+ * transactions for the QH are queued to the DWC_otg * transactions for the QH are queued to the DWC_otg
+ */ */
+ struct list_head periodic_sched_assigned; struct list_head periodic_sched_assigned;
+
+ /** /**
+ * List of periodic QHs that have been queued for exe * List of periodic QHs that have been queued for exe
+ * *
+ * Items move from this list to either periodic_sched * Items move from this list to either periodic_sched
+ * periodic_sched_ready when the channel associated w * periodic_sched_ready when the channel associated w
+ * is released. If the interval for the QH is 1, the * is released. If the interval for the QH is 1, the
+ * periodic_sched_ready because it must be reschedule * periodic_sched_ready because it must be reschedule
+ * frame. Otherwise, the item moves to periodic_sched * frame. Otherwise, the item moves to periodic_sched
+ */ */
+ struct list_head periodic_sched_queued; struct list_head periodic_sched_queued;
+
+ /** /**
+ * Total bandwidth claimed so far for periodic transf * Total bandwidth claimed so far for periodic transf
+ * is in microseconds per (micro)frame. The assumptio * is in microseconds per (micro)frame. The assumptio
+ * periodic transfers may occur in the same (micro)fr * periodic transfers may occur in the same (micro)fr
+ */ */
+ uint16_t periodic_usecs; uint16_t periodic_usecs;
+
+ /** <
+ * Total bandwidth claimed so far for all periodic tr <
+ * in a frame. <
+ * This will include a mixture of HS and FS transfers <
+ * Units are microseconds per (micro)frame. <
+ * We have a budget per frame and have to schedule <
+ * transactions accordingly. <
+ * Watch out for the fact that things are actually sc <
+ * "next frame". <
+ */ <
+ uint16_t frame_usecs[8]; <
+ <
+ /** /**
+ * Frame number read from the core at SOF. The value * Frame number read from the core at SOF. The value
+ * DWC_HFNUM_MAX_FRNUM. * DWC_HFNUM_MAX_FRNUM.
+ */ */
+ uint16_t frame_number; uint16_t frame_number;
+
+ /** /**
+ * Free host channels in the controller. This is a li * Free host channels in the controller. This is a li
+ * dwc_hc_t items. * dwc_hc_t items.
+ */ */
+ struct list_head free_hc_list; struct list_head free_hc_list;
+
+ /** /**
+ * Number of available host channels. | * Number of host channels assigned to periodic trans
+ > * assuming that there is a dedicated host channel fo
+ > * transaction and at least one host channel availabl
+ > * non-periodic transactions.
+ > */
+ > int periodic_channels;
+ >
+ > /**
+ > * Number of host channels assigned to non-periodic t
+ */ */
+ int available_host_channels; | int non_periodic_channels;
+
+ /** /**
+ * Array of pointers to the host channel descriptors. * Array of pointers to the host channel descriptors.
+ * a host channel descriptor given the host channel n * a host channel descriptor given the host channel n
+ * useful in interrupt handlers. * useful in interrupt handlers.
+ */ */
+ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNEL dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNEL
+
+ /** /**
+ * Buffer to use for any data received during the sta * Buffer to use for any data received during the sta
+ * control transfer. Normally no data is transferred * control transfer. Normally no data is transferred
+ * phase. This buffer is used as a bit bucket. * phase. This buffer is used as a bit bucket.
+ */ */
+ uint8_t *status_buf; uint8_t *status_buf;
+
+ /** /**
+ * DMA address for status_buf. * DMA address for status_buf.
+ */ */
+ dma_addr_t status_buf_dma; dma_addr_t status_buf_dma;
+#define DWC_OTG_HCD_STATUS_BUF_SIZE 64 #define DWC_OTG_HCD_STATUS_BUF_SIZE 64
+
+ /** /**
+ * Structure to allow starting the HCD in a non-inter * Structure to allow starting the HCD in a non-inter
+ * during an OTG role change. * during an OTG role change.
+ */ */
+ struct work_struct start_work; struct work_struct start_work;
+ struct usb_hcd *_p; struct usb_hcd *_p;
+
+ /** /**
+ * Connection timer. An OTG host must display a messa * Connection timer. An OTG host must display a messa
+ * does not connect. Started when the VBus power is t * does not connect. Started when the VBus power is t
+ * sysfs attribute "buspower". * sysfs attribute "buspower".
+ */ */
+ struct timer_list conn_timer; struct timer_list conn_timer;
+
+ /* Tasket to do a reset */ /* Tasket to do a reset */
+ struct tasklet_struct *reset_tasklet; struct tasklet_struct *reset_tasklet;
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ uint32_t frrem_samples; uint32_t frrem_samples;
+ uint64_t frrem_accum; uint64_t frrem_accum;
+
+ uint32_t hfnum_7_samples_a; uint32_t hfnum_7_samples_a;
+ uint64_t hfnum_7_frrem_accum_a; uint64_t hfnum_7_frrem_accum_a;
+ uint32_t hfnum_0_samples_a; uint32_t hfnum_0_samples_a;
+ uint64_t hfnum_0_frrem_accum_a; uint64_t hfnum_0_frrem_accum_a;
+ uint32_t hfnum_other_samples_a; uint32_t hfnum_other_samples_a;
+ uint64_t hfnum_other_frrem_accum_a; uint64_t hfnum_other_frrem_accum_a;
+
+ uint32_t hfnum_7_samples_b; uint32_t hfnum_7_samples_b;
+ uint64_t hfnum_7_frrem_accum_b; uint64_t hfnum_7_frrem_accum_b;
+ uint32_t hfnum_0_samples_b; uint32_t hfnum_0_samples_b;
+ uint64_t hfnum_0_frrem_accum_b; uint64_t hfnum_0_frrem_accum_b;
+ uint32_t hfnum_other_samples_b; uint32_t hfnum_other_samples_b;
+ uint64_t hfnum_other_frrem_accum_b; uint64_t hfnum_other_frrem_accum_b;
+#endif #endif
+
+} dwc_otg_hcd_t; } dwc_otg_hcd_t;
+
+/** Gets the dwc_otg_hcd from a struct usb_hcd */ /** Gets the dwc_otg_hcd from a struct usb_hcd */
+static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hc static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hc
+{ {
+ return (dwc_otg_hcd_t *)(hcd->hcd_priv); return (dwc_otg_hcd_t *)(hcd->hcd_priv);
+} }
+
+/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */ /** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */
+static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_ static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_
+{ {
+ return container_of((void *)dwc_otg_hcd, struct usb_h return container_of((void *)dwc_otg_hcd, struct usb_h
+} }
+
+/** @name HCD Create/Destroy Functions */ /** @name HCD Create/Destroy Functions */
+/** @{ */ /** @{ */
+extern int __init dwc_otg_hcd_init(struct device *_dev, dwc_ extern int __init dwc_otg_hcd_init(struct device *_dev, dwc_
+extern void dwc_otg_hcd_remove(struct device *_dev); extern void dwc_otg_hcd_remove(struct device *_dev);
+/** @} */ /** @} */
+
+/** @name Linux HC Driver API Functions */ /** @name Linux HC Driver API Functions */
+/** @{ */ /** @{ */
+
+extern int dwc_otg_hcd_start(struct usb_hcd *hcd); extern int dwc_otg_hcd_start(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_stop(struct usb_hcd *hcd); extern void dwc_otg_hcd_stop(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd); extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_free(struct usb_hcd *hcd); extern void dwc_otg_hcd_free(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb, struct urb *urb,
+ gfp_t mem_flags); gfp_t mem_flags);
+extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
+/* struct usb_host_endpoint * /* struct usb_host_endpoint *
+ struct urb *urb, int statu struct urb *urb, int statu
+extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endp struct usb_host_endp
+extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd); extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd,
+ char *buf); char *buf);
+extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 typeReq,
+ u16 wValue, u16 wValue,
+ u16 wIndex, u16 wIndex,
+ char *buf, char *buf,
+ u16 wLength); u16 wLength);
+
+/** @} */ /** @} */
+
+/** @name Transaction Execution Functions */ /** @name Transaction Execution Functions */
+/** @{ */ /** @{ */
+extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transact extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transact
+extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hc extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hc
+ dwc_otg_transactio dwc_otg_transactio
+extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, str extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, str
+ int _status); int _status);
+/** @} */ /** @} */
+
+/** @name Interrupt Handler Functions */ /** @name Interrupt Handler Functions */
+/** @{ */ /** @{ */
+extern int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_o extern int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_o
+extern int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_d extern int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_d
+extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc
+extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_ extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_
+extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (d extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (d
+extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dw extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dw
+extern int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_ extern int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_
+extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr
+extern int32_t dwc_otg_hcd_handle_disconnect_intr (dwc_otg_hc extern int32_t dwc_otg_hcd_handle_disconnect_intr (dwc_otg_hc
+extern int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dw extern int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dw
+extern int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_ extern int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_
+extern int32_t dwc_otg_hcd_handle_session_req_intr (dwc_otg_h extern int32_t dwc_otg_hcd_handle_session_req_intr (dwc_otg_h
+extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr (dwc_o extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr (dwc_o
+/** @} */ /** @} */
+
+
+/** @name Schedule Queue Functions */ /** @name Schedule Queue Functions */
+/** @{ */ /** @{ */
+
+/* Implemented in dwc_otg_hcd_queue.c */ /* Implemented in dwc_otg_hcd_queue.c */
+extern dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_h extern dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_h
+extern void dwc_otg_hcd_qh_init (dwc_otg_hcd_t *_hcd, dwc_otg extern void dwc_otg_hcd_qh_init (dwc_otg_hcd_t *_hcd, dwc_otg
+extern void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh); extern void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh);
+extern int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_q extern int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_q
+extern void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_o extern void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_o
+extern void dwc_otg_hcd_qh_deactivate (dwc_otg_hcd_t *_hcd, d extern void dwc_otg_hcd_qh_deactivate (dwc_otg_hcd_t *_hcd, d
+extern int dwc_otg_hcd_qh_deferr (dwc_otg_hcd_t *_hcd, dwc_ot extern int dwc_otg_hcd_qh_deferr (dwc_otg_hcd_t *_hcd, dwc_ot
+
+/** Remove and free a QH */ /** Remove and free a QH */
+static inline void dwc_otg_hcd_qh_remove_and_free (dwc_otg_hc static inline void dwc_otg_hcd_qh_remove_and_free (dwc_otg_hc
+ dwc_otg_qh dwc_otg_qh
+{ {
+ dwc_otg_hcd_qh_remove (_hcd, _qh); dwc_otg_hcd_qh_remove (_hcd, _qh);
+ dwc_otg_hcd_qh_free (_qh); dwc_otg_hcd_qh_free (_qh);
+} }
+
+/** Allocates memory for a QH structure. /** Allocates memory for a QH structure.
+ * @return Returns the memory allocate or NULL on error. */ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc (void) static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc (void)
+{ {
+ return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t) return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t)
+} }
+
+extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb
+extern void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct extern void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct
+extern int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, dwc_otg_h extern int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, dwc_otg_h
+
+/** Allocates memory for a QTD structure. /** Allocates memory for a QTD structure.
+ * @return Returns the memory allocate or NULL on error. */ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc (void) static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc (void)
+{ {
+ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_
+} }
+
+/** Frees the memory for a QTD structure. QTD should already /** Frees the memory for a QTD structure. QTD should already
+ * list. * list.
+ * @param[in] _qtd QTD to free.*/ * @param[in] _qtd QTD to free.*/
+static inline void dwc_otg_hcd_qtd_free (dwc_otg_qtd_t *_qtd) static inline void dwc_otg_hcd_qtd_free (dwc_otg_qtd_t *_qtd)
+{ {
+ kfree (_qtd); kfree (_qtd);
+} }
+
+/** Removes a QTD from list. /** Removes a QTD from list.
+ * @param[in] _qtd QTD to remove from list. */ * @param[in] _qtd QTD to remove from list. */
+static inline void dwc_otg_hcd_qtd_remove (dwc_otg_qtd_t *_qt static inline void dwc_otg_hcd_qtd_remove (dwc_otg_qtd_t *_qt
+{ {
+ unsigned long flags; unsigned long flags;
+ local_irq_save (flags); local_irq_save (flags);
+ list_del (&_qtd->qtd_list_entry); list_del (&_qtd->qtd_list_entry);
+ local_irq_restore (flags); local_irq_restore (flags);
+} }
+
+/** Remove and free a QTD */ /** Remove and free a QTD */
+static inline void dwc_otg_hcd_qtd_remove_and_free (dwc_otg_q static inline void dwc_otg_hcd_qtd_remove_and_free (dwc_otg_q
+{ {
+ dwc_otg_hcd_qtd_remove (_qtd); dwc_otg_hcd_qtd_remove (_qtd);
+ dwc_otg_hcd_qtd_free (_qtd); dwc_otg_hcd_qtd_free (_qtd);
+} }
+
+/** @} */ /** @} */
+
+
+/** @name Internal Functions */ /** @name Internal Functions */
+/** @{ */ /** @{ */
+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb); dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb);
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd); void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd);
+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd); void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd);
+/** @} */ /** @} */
+
+/** Gets the usb_host_endpoint associated with an URB. */ /** Gets the usb_host_endpoint associated with an URB. */
+static inline struct usb_host_endpoint *dwc_urb_to_endpoint(s static inline struct usb_host_endpoint *dwc_urb_to_endpoint(s
+{ {
+ struct usb_device *dev = _urb->dev; struct usb_device *dev = _urb->dev;
+ int ep_num = usb_pipeendpoint(_urb->pipe); int ep_num = usb_pipeendpoint(_urb->pipe);
+
+ if (usb_pipein(_urb->pipe)) if (usb_pipein(_urb->pipe))
+ return dev->ep_in[ep_num]; return dev->ep_in[ep_num];
+ else else
+ return dev->ep_out[ep_num]; return dev->ep_out[ep_num];
+} }
+
+/** /**
+ * Gets the endpoint number from a _bEndpointAddress argument * Gets the endpoint number from a _bEndpointAddress argument
+ * qualified with its direction (possible 32 endpoints per de * qualified with its direction (possible 32 endpoints per de
+ */ */
+#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) \ #define dwc_ep_addr_to_endpoint(_bEndpointAddress_) \
+ ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \ ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4) ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
+
+/** Gets the QH that contains the list_head */ /** Gets the QH that contains the list_head */
+#define dwc_list_to_qh(_list_head_ptr_) (container_of(_list_h #define dwc_list_to_qh(_list_head_ptr_) (container_of(_list_h
+
+/** Gets the QTD that contains the list_head */ /** Gets the QTD that contains the list_head */
+#define dwc_list_to_qtd(_list_head_ptr_) (container_of(_list_ #define dwc_list_to_qtd(_list_head_ptr_) (container_of(_list_
+
+/** Check if QH is non-periodic */ /** Check if QH is non-periodic */
+#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == US #define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == US
+ (_qh_ptr_->ep_type == US (_qh_ptr_->ep_type == US
+
+/** High bandwidth multiplier as encoded in highspeed endpoin /** High bandwidth multiplier as encoded in highspeed endpoin
+#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) > #define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >
+
+/** Packet size for any kind of endpoint descriptor */ /** Packet size for any kind of endpoint descriptor */
+#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x #define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x
+
+/** /**
+ * Returns true if _frame1 is less than or equal to _frame2. * Returns true if _frame1 is less than or equal to _frame2.
+ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rol * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rol
+ * frame number when the max frame number is reached. * frame number when the max frame number is reached.
+ */ */
+static inline int dwc_frame_num_le(uint16_t _frame1, uint16_t static inline int dwc_frame_num_le(uint16_t _frame1, uint16_t
+{ {
+ return ((_frame2 - _frame1) & DWC_HFNUM_MAX_FRNUM) <= return ((_frame2 - _frame1) & DWC_HFNUM_MAX_FRNUM) <=
+ (DWC_HFNUM_MAX_FRNUM >> 1); (DWC_HFNUM_MAX_FRNUM >> 1);
+} }
+
+/** /**
+ * Returns true if _frame1 is greater than _frame2. The compa * Returns true if _frame1 is greater than _frame2. The compa
+ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover
+ * number when the max frame number is reached. * number when the max frame number is reached.
+ */ */
+static inline int dwc_frame_num_gt(uint16_t _frame1, uint16_t static inline int dwc_frame_num_gt(uint16_t _frame1, uint16_t
+{ {
+ return (_frame1 != _frame2) && return (_frame1 != _frame2) &&
+ (((_frame1 - _frame2) & DWC_HFNUM_MAX_FRNUM) (((_frame1 - _frame2) & DWC_HFNUM_MAX_FRNUM)
+ (DWC_HFNUM_MAX_FRNUM >> 1)); (DWC_HFNUM_MAX_FRNUM >> 1));
+} }
+
+/** /**
+ * Increments _frame by the amount specified by _inc. The add * Increments _frame by the amount specified by _inc. The add
+ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value. * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value.
+ */ */
+static inline uint16_t dwc_frame_num_inc(uint16_t _frame, uin static inline uint16_t dwc_frame_num_inc(uint16_t _frame, uin
+{ {
+ return (_frame + _inc) & DWC_HFNUM_MAX_FRNUM; return (_frame + _inc) & DWC_HFNUM_MAX_FRNUM;
+} }
+
+static inline uint16_t dwc_full_frame_num (uint16_t _frame) static inline uint16_t dwc_full_frame_num (uint16_t _frame)
+{ {
+ return ((_frame) & DWC_HFNUM_MAX_FRNUM) >> 3; return ((_frame) & DWC_HFNUM_MAX_FRNUM) >> 3;
+} }
+
+static inline uint16_t dwc_micro_frame_num (uint16_t _frame) static inline uint16_t dwc_micro_frame_num (uint16_t _frame)
+{ {
+ return (_frame) & 0x7; return (_frame) & 0x7;
+} }
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+/** /**
+ * Macro to sample the remaining PHY clocks left in the curre * Macro to sample the remaining PHY clocks left in the curre
+ * may be used during debugging to determine the average time * may be used during debugging to determine the average time
+ * execute sections of code. There are two possible sample po * execute sections of code. There are two possible sample po
+ * "b", so the _letter argument must be one of these values. * "b", so the _letter argument must be one of these values.
+ * *
+ * To dump the average sample times, read the "hcd_frrem" sys * To dump the average sample times, read the "hcd_frrem" sys
+ * example, "cat /sys/devices/lm0/hcd_frrem". * example, "cat /sys/devices/lm0/hcd_frrem".
+ */ */
+#define dwc_sample_frrem(_hcd, _qh, _letter) \ #define dwc_sample_frrem(_hcd, _qh, _letter) \
+{ \ { \
+ hfnum_data_t hfnum; \ hfnum_data_t hfnum; \
+ dwc_otg_qtd_t *qtd; \ dwc_otg_qtd_t *qtd; \
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, q qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, q
+ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_f if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_f
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->ho hfnum.d32 = dwc_read_reg32(&_hcd->core_if->ho
+ switch (hfnum.b.frnum & 0x7) { \ switch (hfnum.b.frnum & 0x7) { \
+ case 7: \ case 7: \
+ _hcd->hfnum_7_samples_##_letter++; \ _hcd->hfnum_7_samples_##_letter++; \
+ _hcd->hfnum_7_frrem_accum_##_letter + _hcd->hfnum_7_frrem_accum_##_letter +
+ break; \ break; \
+ case 0: \ case 0: \
+ _hcd->hfnum_0_samples_##_letter++; \ _hcd->hfnum_0_samples_##_letter++; \
+ _hcd->hfnum_0_frrem_accum_##_letter + _hcd->hfnum_0_frrem_accum_##_letter +
+ break; \ break; \
+ default: \ default: \
+ _hcd->hfnum_other_samples_##_letter++ _hcd->hfnum_other_samples_##_letter++
+ _hcd->hfnum_other_frrem_accum_##_lett _hcd->hfnum_other_frrem_accum_##_lett
+ break; \ break; \
+ } \ } \
+ } \ } \
+} }
+#else #else
+#define dwc_sample_frrem(_hcd, _qh, _letter) #define dwc_sample_frrem(_hcd, _qh, _letter)
+#endif #endif
+#endif #endif
+#endif /* DWC_DEVICE_ONLY */ #endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_intr.c.sdiff b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_intr.c.sdiff
new file mode 100644
index 00000000000..990bcc6a419
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_intr.c.sdiff
@@ -0,0 +1,1763 @@
+/* ========================================================== /* ==========================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers
+ * $Revision: #7 $ * $Revision: #7 $
+ * $Date: 2005/11/02 $ * $Date: 2005/11/02 $
+ * $Change: 553126 $ * $Change: 553126 $
+ * *
+ * Synopsys HS OTG Linux Software Driver and documentation (h * Synopsys HS OTG Linux Software Driver and documentation (h
+ * "Software") is an Unsupported proprietary work of Synopsys * "Software") is an Unsupported proprietary work of Synopsys
+ * otherwise expressly agreed to in writing between Synopsys * otherwise expressly agreed to in writing between Synopsys
+ * *
+ * The Software IS NOT an item of Licensed Software or Licens * The Software IS NOT an item of Licensed Software or Licens
+ * any End User Software License Agreement or Agreement for L * any End User Software License Agreement or Agreement for L
+ * with Synopsys or any supplement thereto. You are permitted * with Synopsys or any supplement thereto. You are permitted
+ * redistribute this Software in source and binary forms, wit * redistribute this Software in source and binary forms, wit
+ * modification, provided that redistributions of source code * modification, provided that redistributions of source code
+ * notice. You may not view, use, disclose, copy or distribut * notice. You may not view, use, disclose, copy or distribut
+ * any information contained herein except pursuant to this l * any information contained herein except pursuant to this l
+ * Synopsys. If you do not agree with this notice, including * Synopsys. If you do not agree with this notice, including
+ * below, then you are not authorized to use the Software. * below, then you are not authorized to use the Software.
+ * *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO
+ * DAMAGE. * DAMAGE.
+ * ========================================================== * ==========================================================
+
+#ifndef CONFIG_DWC_DEVICE_ONLY #ifndef CONFIG_DWC_DEVICE_ONLY
+
+#include "dwc_otg_driver.h" #include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h" #include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h" #include "dwc_otg_regs.h"
+
+const int erratum_usb09_patched = 0; const int erratum_usb09_patched = 0;
+const int deferral_on = 1; const int deferral_on = 1;
+const int nak_deferral_delay = 8; | int nak_deferral_delay = 20;
+ > module_param(nak_deferral_delay, int, 0644);
+const int nyet_deferral_delay = 1; const int nyet_deferral_delay = 1;
+
+/** @file /** @file
+ * This file contains the implementation of the HCD Interrupt * This file contains the implementation of the HCD Interrupt
+ */ */
+
+/** This function handles interrupts for the HCD. */ /** This function handles interrupts for the HCD. */
+int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t * _dwc_otg_hcd) int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t * _dwc_otg_hcd)
+{ {
+ int retval = 0; int retval = 0;
+ dwc_otg_core_if_t * core_if = _dwc_otg_hcd->core_if; dwc_otg_core_if_t * core_if = _dwc_otg_hcd->core_if;
+ gintsts_data_t gintsts; gintsts_data_t gintsts;
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ dwc_otg_core_global_regs_t * global_regs = core_if->core_ dwc_otg_core_global_regs_t * global_regs = core_if->core_
+
+#endif /* */ #endif /* */
+
+ /* Check if HOST Mode */ /* Check if HOST Mode */
+ if (dwc_otg_is_host_mode(core_if)) { if (dwc_otg_is_host_mode(core_if)) {
+ gintsts.d32 = dwc_otg_read_core_intr(core_if) gintsts.d32 = dwc_otg_read_core_intr(core_if)
+ if (!gintsts.d32) { if (!gintsts.d32) {
+ return 0; return 0;
+ } }
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ /* Don't print debug message in the interrupt han /* Don't print debug message in the interrupt han
+#ifndef DEBUG_SOF #ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif #endif
+ DWC_DEBUGPL(DBG_HCD, "\n"); DWC_DEBUGPL(DBG_HCD, "\n");
+#endif /* */ #endif /* */
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF #ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif #endif
+ DWC_DEBUGPL(DBG_HCD,"DWC OTG HCD Interrup DWC_DEBUGPL(DBG_HCD,"DWC OTG HCD Interrup
+ gintsts.d32); gintsts.d32);
+#endif /* */ #endif /* */
+ if (gintsts.b.sofintr) { if (gintsts.b.sofintr) {
+ retval |= dwc_otg_hcd_handle_sof_intr retval |= dwc_otg_hcd_handle_sof_intr
+ } }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET #ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (!atomic_read(&release_later) && gintsts.b if (!atomic_read(&release_later) && gintsts.b
+#else #else
+ if (gintsts.b.rxstsqlvl) { if (gintsts.b.rxstsqlvl) {
+#endif #endif
+ retval |= dwc_otg_hcd_handle_rx_statu retval |= dwc_otg_hcd_handle_rx_statu
+ } }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET #ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (!atomic_read(&release_later) && gintsts.b if (!atomic_read(&release_later) && gintsts.b
+#else #else
+ if (gintsts.b.nptxfempty) { if (gintsts.b.nptxfempty) {
+#endif #endif
+ retval |= dwc_otg_hcd_handle_np_tx_fi retval |= dwc_otg_hcd_handle_np_tx_fi
+ } }
+ if (gintsts.b.i2cintr) { if (gintsts.b.i2cintr) {
+ /** @todo Implement i2cintr handler. /** @todo Implement i2cintr handler.
+ } }
+ if (gintsts.b.portintr) { if (gintsts.b.portintr) {
+ retval |= dwc_otg_hcd_handle_port_int retval |= dwc_otg_hcd_handle_port_int
+ } }
+ if (gintsts.b.hcintr) { if (gintsts.b.hcintr) {
+ retval |= dwc_otg_hcd_handle_hc_intr( retval |= dwc_otg_hcd_handle_hc_intr(
+ } }
+ if (gintsts.b.ptxfempty) { if (gintsts.b.ptxfempty) {
+ retval |= dwc_otg_hcd_handle_perio_tx retval |= dwc_otg_hcd_handle_perio_tx
+ } }
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF #ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif #endif
+ { {
+ DWC_DEBUGPL(DBG_HCD, DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD Finished Se "DWC OTG HCD Finished Se
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gi DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gi
+ dwc_read_reg32(&global_r dwc_read_reg32(&global_r
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gi DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gi
+ dwc_read_reg32(&global_r dwc_read_reg32(&global_r
+ } }
+#endif /* */ #endif /* */
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF #ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK) if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif #endif
+ DWC_DEBUGPL(DBG_HCD, "\n"); DWC_DEBUGPL(DBG_HCD, "\n");
+#endif /* */ #endif /* */
+ } }
+ return retval; return retval;
+} }
+
+
+#ifdef DWC_TRACK_MISSED_SOFS #ifdef DWC_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs #warning Compiling code to track missed SOFs
+#define FRAME_NUM_ARRAY_SIZE 1000 #define FRAME_NUM_ARRAY_SIZE 1000
+/** /**
+ * This function is for debug only. * This function is for debug only.
+ */ */
+static inline void track_missed_sofs(uint16_t _curr_frame_num static inline void track_missed_sofs(uint16_t _curr_frame_num
+{ {
+ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE] static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE]
+ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_
+ static int frame_num_idx = 0; static int frame_num_idx = 0;
+ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM; static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
+ static int dumped_frame_num_array = 0; static int dumped_frame_num_array = 0;
+ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) { if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+ if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FR if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FR
+ _curr_frame_number)) { _curr_frame_number)) {
+ frame_num_array[frame_num_idx] = _cur frame_num_array[frame_num_idx] = _cur
+ last_frame_num_array[frame_num_idx++] last_frame_num_array[frame_num_idx++]
+ } }
+ } else if (!dumped_frame_num_array) { } else if (!dumped_frame_num_array) {
+ int i; int i;
+ printk(KERN_EMERG USB_DWC "Frame Last Fra printk(KERN_EMERG USB_DWC "Frame Last Fra
+ printk(KERN_EMERG USB_DWC "----- -------- printk(KERN_EMERG USB_DWC "----- --------
+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+ printk(KERN_EMERG USB_DWC "0x%04x printk(KERN_EMERG USB_DWC "0x%04x
+ frame_num_array[i], last_fram frame_num_array[i], last_fram
+ } }
+ dumped_frame_num_array = 1; dumped_frame_num_array = 1;
+ } }
+ last_frame_num = _curr_frame_number; last_frame_num = _curr_frame_number;
+} }
+#endif /* */ #endif /* */
+
+/** /**
+ * Handles the start-of-frame interrupt in host mode. Non-per * Handles the start-of-frame interrupt in host mode. Non-per
+ * transactions may be queued to the DWC_otg controller for t * transactions may be queued to the DWC_otg controller for t
+ * (micro)frame. Periodic transactions may be queued to the c * (micro)frame. Periodic transactions may be queued to the c
+ * next (micro)frame. * next (micro)frame.
+ */ */
+int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t * _hcd) int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t * _hcd)
+{ {
+ hfnum_data_t hfnum; hfnum_data_t hfnum;
+ struct list_head *qh_entry; struct list_head *qh_entry;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ dwc_otg_transaction_type_e tr_type; dwc_otg_transaction_type_e tr_type;
+ gintsts_data_t gintsts = {.d32 = 0}; gintsts_data_t gintsts = {.d32 = 0};
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->h hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->h
+
+#ifdef DEBUG_SOF #ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n"); DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
+#endif /* */ #endif /* */
+ _hcd->frame_number = hfnum.b.frnum; _hcd->frame_number = hfnum.b.frnum;
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ _hcd->frrem_accum += hfnum.b.frrem; _hcd->frrem_accum += hfnum.b.frrem;
+ _hcd->frrem_samples++; _hcd->frrem_samples++;
+#endif /* */ #endif /* */
+
+#ifdef DWC_TRACK_MISSED_SOFS #ifdef DWC_TRACK_MISSED_SOFS
+ track_missed_sofs(_hcd->frame_number); track_missed_sofs(_hcd->frame_number);
+#endif /* */ #endif /* */
+
+ /* Determine whether any periodic QHs should be executed. /* Determine whether any periodic QHs should be executed.
+ qh_entry = _hcd->periodic_sched_inactive.next; qh_entry = _hcd->periodic_sched_inactive.next;
+ while (qh_entry != &_hcd->periodic_sched_inactive) { while (qh_entry != &_hcd->periodic_sched_inactive) {
+ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_li qh = list_entry(qh_entry, dwc_otg_qh_t, qh_li
+ qh_entry = qh_entry->next; qh_entry = qh_entry->next;
+ if (dwc_frame_num_le(qh->sched_frame, _hcd->f if (dwc_frame_num_le(qh->sched_frame, _hcd->f
+ /* /*
+ * Move QH to the ready list to be execut * Move QH to the ready list to be execut
+ * (micro)frame. * (micro)frame.
+ */ */
+ list_move(&qh->qh_list_entry,&_hcd->perio list_move(&qh->qh_list_entry,&_hcd->perio
+ } }
+ } }
+ tr_type = dwc_otg_hcd_select_transactions(_hcd); tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) { if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type) dwc_otg_hcd_queue_transactions(_hcd, tr_type)
+ //schedule_work(&_hcd->hcd_queue_work); //schedule_work(&_hcd->hcd_queue_work);
+ } }
+
+ /* Clear interrupt */ /* Clear interrupt */
+ gintsts.b.sofintr = 1; gintsts.b.sofintr = 1;
+ dwc_write_reg32(&_hcd->core_if->core_global_regs->gin dwc_write_reg32(&_hcd->core_if->core_global_regs->gin
+ return 1; return 1;
+} }
+
+/** Handles the Rx Status Queue Level Interrupt, which indica /** Handles the Rx Status Queue Level Interrupt, which indica
+ * least one packet in the Rx FIFO. The packets are moved fr * least one packet in the Rx FIFO. The packets are moved fr
+ * memory if the DWC_otg controller is operating in Slave mod * memory if the DWC_otg controller is operating in Slave mod
+int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd
+{ {
+ host_grxsts_data_t grxsts; host_grxsts_data_t grxsts;
+ dwc_hc_t * hc = NULL; dwc_hc_t * hc = NULL;
+ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n"); DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
+ grxsts.d32 = dwc_read_reg32(&_dwc_otg_hcd->core_if->c grxsts.d32 = dwc_read_reg32(&_dwc_otg_hcd->core_if->c
+ hc = _dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum]; hc = _dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
+
+ /* Packet Status */ /* Packet Status */
+ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum
+ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bc DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bc
+ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n" DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n"
+ hc->data_pid_start); hc->data_pid_start);
+ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b. DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.
+ switch (grxsts.b.pktsts) { switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN: case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer. */ /* Read the data into the host buffer. */
+ if (grxsts.b.bcnt > 0) { if (grxsts.b.bcnt > 0) {
+ dwc_otg_read_packet(_dwc_otg_hcd->cor dwc_otg_read_packet(_dwc_otg_hcd->cor
+ hc->xfer_buff, g hc->xfer_buff, g
+
+ /* Update the HC fields for the next pack /* Update the HC fields for the next pack
+ hc->xfer_count += grxsts.b.bcnt; hc->xfer_count += grxsts.b.bcnt;
+ hc->xfer_buff += grxsts.b.bcnt; hc->xfer_buff += grxsts.b.bcnt;
+ } }
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR: case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
+ case DWC_GRXSTS_PKTSTS_CH_HALTED: case DWC_GRXSTS_PKTSTS_CH_HALTED:
+ /* Handled in interrupt, just ignore data /* Handled in interrupt, just ignore data
+ break; break;
+ default: default:
+ DWC_ERROR("RX_STS_Q Interrupt: Unknown status DWC_ERROR("RX_STS_Q Interrupt: Unknown status
+ grxsts.b.pktsts); grxsts.b.pktsts);
+ break; break;
+ } }
+ return 1; return 1;
+} }
+
+
+/** This interrupt occurs when the non-periodic Tx FIFO is ha /** This interrupt occurs when the non-periodic Tx FIFO is ha
+ * data packets may be written to the FIFO for OUT transfers. * data packets may be written to the FIFO for OUT transfers.
+ * may be written to the non-periodic request queue for IN tr * may be written to the non-periodic request queue for IN tr
+ * interrupt is enabled only in Slave mode. */ * interrupt is enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_ int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_
+ _dwc_otg _dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Int DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Int
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd, dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_N DWC_OTG_TRANSACTION_N
+ //schedule_work(&_dwc_otg_hcd->hcd_queue_work); //schedule_work(&_dwc_otg_hcd->hcd_queue_work);
+ return 1; return 1;
+} }
+
+
+/** This interrupt occurs when the periodic Tx FIFO is half-e /** This interrupt occurs when the periodic Tx FIFO is half-e
+ * packets may be written to the FIFO for OUT transfers. More * packets may be written to the FIFO for OUT transfers. More
+ * written to the periodic request queue for IN transfers. Th * written to the periodic request queue for IN transfers. Th
+ * enabled only in Slave mode. */ * enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_h int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_h
+ _dwc_ _dwc_
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interru DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interru
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd, dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_P DWC_OTG_TRANSACTION_P
+ //schedule_work(&_dwc_otg_hcd->hcd_queue_work); //schedule_work(&_dwc_otg_hcd->hcd_queue_work);
+ return 1; return 1;
+} }
+
+
+/** There are multiple conditions that can cause a port inter /** There are multiple conditions that can cause a port inter
+ * determines which interrupt conditions have occurred and ha * determines which interrupt conditions have occurred and ha
+ * appropriately. */ * appropriately. */
+int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t * _dwc_otg int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t * _dwc_otg
+{ {
+ int retval = 0; int retval = 0;
+ hprt0_data_t hprt0; hprt0_data_t hprt0;
+ hprt0_data_t hprt0_modify; hprt0_data_t hprt0_modify;
+ hprt0.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->hos hprt0.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->hos
+ hprt0_modify.d32 = dwc_read_reg32(_dwc_otg_hcd->core_ hprt0_modify.d32 = dwc_read_reg32(_dwc_otg_hcd->core_
+
+ /* Clear appropriate bits in HPRT0 to clear the interrupt /* Clear appropriate bits in HPRT0 to clear the interrupt
+ * GINTSTS */ * GINTSTS */
+ hprt0_modify.b.prtena = 0; hprt0_modify.b.prtena = 0;
+ hprt0_modify.b.prtconndet = 0; hprt0_modify.b.prtconndet = 0;
+ hprt0_modify.b.prtenchng = 0; hprt0_modify.b.prtenchng = 0;
+ hprt0_modify.b.prtovrcurrchng = 0; hprt0_modify.b.prtovrcurrchng = 0;
+
+ /* Port Connect Detected /* Port Connect Detected
+ * Set flag and clear if detected */ * Set flag and clear if detected */
+ if (hprt0.b.prtconndet) { if (hprt0.b.prtconndet) {
+ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0= DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=
+ "Port Connect Detected--\n", hpr "Port Connect Detected--\n", hpr
+ _dwc_otg_hcd->flags.b.port_connect_status_cha _dwc_otg_hcd->flags.b.port_connect_status_cha
+ _dwc_otg_hcd->flags.b.port_connect_status = 1 _dwc_otg_hcd->flags.b.port_connect_status = 1
+ hprt0_modify.b.prtconndet = 1; hprt0_modify.b.prtconndet = 1;
+
+ /* B-Device has connected, Delete the connection /* B-Device has connected, Delete the connection
+ del_timer(&_dwc_otg_hcd->conn_timer); del_timer(&_dwc_otg_hcd->conn_timer);
+
+ /* The Hub driver asserts a reset when it sees po /* The Hub driver asserts a reset when it sees po
+ * status change flag * status change flag
+ */ */
+ retval |= 1; retval |= 1;
+ } }
+
+ /* Port Enable Changed /* Port Enable Changed
+ * Clear if detected - Set internal flag if disabled */ * Clear if detected - Set internal flag if disabled */
+ if (hprt0.b.prtenchng) { if (hprt0.b.prtenchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT
+ "Port Enable Changed--\n", hprt0 "Port Enable Changed--\n", hprt0
+ hprt0_modify.b.prtenchng = 1; hprt0_modify.b.prtenchng = 1;
+ if (hprt0.b.prtena == 1) { if (hprt0.b.prtena == 1) {
+ int do_reset = 0; int do_reset = 0;
+ dwc_otg_core_params_t * params = dwc_otg_core_params_t * params =
+ _dwc_otg_hcd->core_if->core_param _dwc_otg_hcd->core_if->core_param
+ dwc_otg_core_global_regs_t * global_r dwc_otg_core_global_regs_t * global_r
+ _dwc_otg_hcd->core_if->core_globa _dwc_otg_hcd->core_if->core_globa
+ dwc_otg_host_if_t * host_if = dwc_otg_host_if_t * host_if =
+ _dwc_otg_hcd->core_if->host_if; _dwc_otg_hcd->core_if->host_if;
+
+ /* Check if we need to adjust the PHY clo /* Check if we need to adjust the PHY clo
+ * low power and adjust it */ * low power and adjust it */
+ if (params->host_support_fs_ls_low_power) if (params->host_support_fs_ls_low_power)
+ gusbcfg_data_t usbcfg; gusbcfg_data_t usbcfg;
+ usbcfg.d32 = dwc_read_reg32(& usbcfg.d32 = dwc_read_reg32(&
+ if ((hprt0.b.prtspd == DWC_HP if ((hprt0.b.prtspd == DWC_HP
+ (hprt0.b.prtspd == DW (hprt0.b.prtspd == DW
+ /* /*
+ * Low power * Low power
+ */ */
+ hcfg_data_t hcfg; hcfg_data_t hcfg;
+ if (usbcfg.b.phylpwrc if (usbcfg.b.phylpwrc
+ /* Set PHY low po /* Set PHY low po
+ usbcfg.b.phylpwrc usbcfg.b.phylpwrc
+ dwc_write_reg dwc_write_reg
+ do_reset = 1; do_reset = 1;
+ } }
+ hcfg.d32 = dwc_read_r hcfg.d32 = dwc_read_r
+ if ((hprt0.b.prtspd = if ((hprt0.b.prtspd =
+ (params->host (params->host
+ DWC_HOST_LS_L DWC_HOST_LS_L
+ /* 6 MHZ */ /* 6 MHZ */
+ DWC_DEBUGPL(DBG_C DWC_DEBUGPL(DBG_C
+ if (hcfg.b.fs if (hcfg.b.fs
+ hcfg. hcfg.
+ dwc_w dwc_w
+ do_re do_re
+ } }
+ } else { } else {
+ /* 48 MHZ */ /* 48 MHZ */
+ DWC_DEBUGPL(DBG_C DWC_DEBUGPL(DBG_C
+ if (hcfg.b.fs if (hcfg.b.fs
+ hcfg. hcfg.
+ dwc_w dwc_w
+ do_re do_re
+ } }
+ } }
+ } else { } else {
+ /* /*
+ * Not low power * Not low power
+ */ */
+ if (usbcfg.b.phylpwrclkse if (usbcfg.b.phylpwrclkse
+ usbcfg.b.phyl usbcfg.b.phyl
+ dwc_write_reg dwc_write_reg
+ do_reset = 1; do_reset = 1;
+ } }
+ } }
+ if (do_reset) { if (do_reset) {
+ tasklet_schedule(_dwc tasklet_schedule(_dwc
+ } }
+ } }
+ if (!do_reset) { if (!do_reset) {
+ /* Port has been enabled set the /* Port has been enabled set the
+ _dwc_otg_hcd->flags.b.port_reset_ _dwc_otg_hcd->flags.b.port_reset_
+ } }
+ } else { } else {
+ _dwc_otg_hcd->flags.b.port_enable_cha _dwc_otg_hcd->flags.b.port_enable_cha
+ } }
+ retval |= 1; retval |= 1;
+ } }
+
+ /** Overcurrent Change Interrupt */ /** Overcurrent Change Interrupt */
+ if (hprt0.b.prtovrcurrchng) { if (hprt0.b.prtovrcurrchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT
+ "Port Overcurrent Changed--\n", "Port Overcurrent Changed--\n",
+ _dwc_otg_hcd->flags.b.port_over_current_chang _dwc_otg_hcd->flags.b.port_over_current_chang
+ hprt0_modify.b.prtovrcurrchng = 1; hprt0_modify.b.prtovrcurrchng = 1;
+ retval |= 1; retval |= 1;
+ } }
+
+ /* Clear Port Interrupts */ /* Clear Port Interrupts */
+ dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0,hpr dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0,hpr
+ return retval; return retval;
+} }
+
+/** This interrupt indicates that one or more host channels h /** This interrupt indicates that one or more host channels h
+ * interrupt. There are multiple conditions that can cause ea * interrupt. There are multiple conditions that can cause ea
+ * interrupt. This function determines which conditions have * interrupt. This function determines which conditions have
+ * host channel interrupt and handles them appropriately. */ * host channel interrupt and handles them appropriately. */
+int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t * _dwc_otg_h int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t * _dwc_otg_h
+{ {
+ int i; int i;
+ int retval = 0; int retval = 0;
+ haint_data_t haint; haint_data_t haint;
+
+ /* Clear appropriate bits in HCINTn to clear the interrup /* Clear appropriate bits in HCINTn to clear the interrup
+ * GINTSTS */ * GINTSTS */
+ haint.d32 = dwc_otg_read_host_all_channels_intr(_dwc_otg_ haint.d32 = dwc_otg_read_host_all_channels_intr(_dwc_otg_
+ for (i = 0; i < _dwc_otg_hcd->core_if->core_params->h for (i = 0; i < _dwc_otg_hcd->core_if->core_params->h
+ if (haint.b2.chint & (1 << i)) { if (haint.b2.chint & (1 << i)) {
+ retval |= dwc_otg_hcd_handle_hc_n_int retval |= dwc_otg_hcd_handle_hc_n_int
+ } }
+ } }
+ return retval; return retval;
+} }
+
+/* Macro used to clear one channel interrupt */ /* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hc_regs_,_intr_) \ #define clear_hc_int(_hc_regs_,_intr_) \
+ do { \ do { \
+ hcint_data_t hcint_clear = { .d32 = 0}; \ hcint_data_t hcint_clear = { .d32 = 0}; \
+ hcint_clear.b._intr_ = 1; \ hcint_clear.b._intr_ = 1; \
+ dwc_write_reg32(&((_hc_regs_)->hcint), hcint_ dwc_write_reg32(&((_hc_regs_)->hcint), hcint_
+ } while (0) } while (0)
+
+/* /*
+ * Macro used to disable one channel interrupt. Channel inter * Macro used to disable one channel interrupt. Channel inter
+ * disabled when the channel is halted or released by the int * disabled when the channel is halted or released by the int
+ * There is no need to handle further interrupts of that type * There is no need to handle further interrupts of that type
+ * channel is re-assigned. In fact, subsequent handling may c * channel is re-assigned. In fact, subsequent handling may c
+ * because the channel structures are cleaned up when the cha * because the channel structures are cleaned up when the cha
+ */ */
+#define disable_hc_int(_hc_regs_,_intr_) \ #define disable_hc_int(_hc_regs_,_intr_) \
+ do { \ do { \
+ hcintmsk_data_t hcintmsk = {.d32 = 0}; \ hcintmsk_data_t hcintmsk = {.d32 = 0}; \
+ hcintmsk.b._intr_ = 1; \ hcintmsk.b._intr_ = 1; \
+ dwc_modify_reg32(&((_hc_regs_)->hcintmsk), hc dwc_modify_reg32(&((_hc_regs_)->hcintmsk), hc
+ } while (0) } while (0)
+
+/** /**
+ * Gets the actual length of a transfer after the transfer ha * Gets the actual length of a transfer after the transfer ha
+ * holds the reason for the halt. * holds the reason for the halt.
+ * *
+ * For IN transfers where _halt_status is DWC_OTG_HC_XFER_COM * For IN transfers where _halt_status is DWC_OTG_HC_XFER_COM
+ * *_short_read is set to 1 upon return if less than the requ * *_short_read is set to 1 upon return if less than the requ
+ * number of bytes were transferred. Otherwise, *_short_read * number of bytes were transferred. Otherwise, *_short_read
+ * return. _short_read may also be NULL on entry, in which ca * return. _short_read may also be NULL on entry, in which ca
+ * unchanged. * unchanged.
+ */ */
+static uint32_t get_actual_xfer_length(dwc_hc_t * _hc, static uint32_t get_actual_xfer_length(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status, int *_short_read) dwc_otg_halt_status_e _halt_status, int *_short_read)
+{ {
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ uint32_t length; uint32_t length;
+ if (_short_read != NULL) { if (_short_read != NULL) {
+ *_short_read = 0; *_short_read = 0;
+ } }
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (_halt_status == DWC_OTG_HC_XFER_COMPLETE) { if (_halt_status == DWC_OTG_HC_XFER_COMPLETE) {
+ if (_hc->ep_is_in) { if (_hc->ep_is_in) {
+ length = _hc->xfer_len - hctsiz.b.xfe length = _hc->xfer_len - hctsiz.b.xfe
+ if (_short_read != NULL) { if (_short_read != NULL) {
+ *_short_read = (hctsiz.b.xfer *_short_read = (hctsiz.b.xfer
+ } }
+ } else if (_hc->qh->do_split) { } else if (_hc->qh->do_split) {
+ length = _qtd->ssplit_out_xfer_count; length = _qtd->ssplit_out_xfer_count;
+ } else { } else {
+ length = _hc->xfer_len; length = _hc->xfer_len;
+ } }
+ } else { } else {
+ /* /*
+ * Must use the hctsiz.pktcnt field to determine * Must use the hctsiz.pktcnt field to determine
+ * has been transferred. This field reflects the * has been transferred. This field reflects the
+ * packets that have been transferred via the USB * packets that have been transferred via the USB
+ * always an integral number of packets if the tr * always an integral number of packets if the tr
+ * halted before its normal completion. (Can't us * halted before its normal completion. (Can't us
+ * hctsiz.xfersize field because that reflects th * hctsiz.xfersize field because that reflects th
+ * bytes transferred via the AHB, not the USB). * bytes transferred via the AHB, not the USB).
+ */ */
+ length = (_hc->start_pkt_count - hctsiz.b.pktcnt) length = (_hc->start_pkt_count - hctsiz.b.pktcnt)
+ } }
+ return length; return length;
+} }
+
+/** /**
+ * Updates the state of the URB after a Transfer Complete int * Updates the state of the URB after a Transfer Complete int
+ * host channel. Updates the actual_length field of the URB b * host channel. Updates the actual_length field of the URB b
+ * number of bytes transferred via the host channel. Sets the * number of bytes transferred via the host channel. Sets the
+ * if the data transfer is finished. * if the data transfer is finished.
+ * *
+ * @return 1 if the data transfer specified by the URB is com * @return 1 if the data transfer specified by the URB is com
+ * 0 otherwise. * 0 otherwise.
+ */ */
+static int update_urb_state_xfer_comp(dwc_hc_t * _hc, static int update_urb_state_xfer_comp(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc dwc_otg_hc_regs_t * _hc
+ dwc_otg_qtd_t * _qtd, i dwc_otg_qtd_t * _qtd, i
+{ {
+ int xfer_done = 0; int xfer_done = 0;
+ int short_read = 0; int short_read = 0;
+ _urb->actual_length += get_actual_xfer_length(_hc, _h _urb->actual_length += get_actual_xfer_length(_hc, _h
+ DWC_OTG_HC_XFER_COMPLETE, DWC_OTG_HC_XFER_COMPLETE,
+ if (short_read || (_urb->actual_length == _urb->trans if (short_read || (_urb->actual_length == _urb->trans
+ xfer_done = 1; xfer_done = 1;
+ if (_urb->actual_length == _urb->transfer_buf | if (short_read && (_urb->transfer_flags & URB
+ *status = 0; <
+ } else if (short_read && (_urb->transfer_flag <
+ *status = -EREMOTEIO; *status = -EREMOTEIO;
+ } else { } else {
+ *status = 0; *status = 0;
+ } }
+ } }
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ { {
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, chann DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, chann
+ __func__, (_hc->ep_is_in ? "IN" __func__, (_hc->ep_is_in ? "IN"
+ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n",
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n
+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer
+ _urb->transfer_buffer_length); _urb->transfer_buffer_length);
+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length % DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %
+ _urb->actual_length); _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_
+ short_read, xfer_done); short_read, xfer_done);
+ } }
+#endif /* */ #endif /* */
+ return xfer_done; return xfer_done;
+} }
+
+/* /*
+ * Save the starting data toggle for the next transfer. The d * Save the starting data toggle for the next transfer. The d
+ * saved in the QH for non-control transfers and it's saved i * saved in the QH for non-control transfers and it's saved i
+ * control transfers. * control transfers.
+ */ */
+static void save_data_toggle(dwc_hc_t * _hc, static void save_data_toggle(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd) dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+{ {
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) { if (_hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
+ dwc_otg_qh_t * qh = _hc->qh; dwc_otg_qh_t * qh = _hc->qh;
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA qh->data_toggle = DWC_OTG_HC_PID_DATA
+ } else { } else {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA qh->data_toggle = DWC_OTG_HC_PID_DATA
+ } }
+ } else { } else {
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DA _qtd->data_toggle = DWC_OTG_HC_PID_DA
+ } else { } else {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DA _qtd->data_toggle = DWC_OTG_HC_PID_DA
+ } }
+ } }
+} }
+
+/** /**
+ * Frees the first QTD in the QH's list if free_qtd is 1. For * Frees the first QTD in the QH's list if free_qtd is 1. For
+ * QHs, removes the QH from the active non-periodic schedule. * QHs, removes the QH from the active non-periodic schedule.
+ * still linked to the QH, the QH is added to the end of the * still linked to the QH, the QH is added to the end of the
+ * non-periodic schedule. For periodic QHs, removes the QH fr * non-periodic schedule. For periodic QHs, removes the QH fr
+ * schedule if no more QTDs are linked to the QH. * schedule if no more QTDs are linked to the QH.
+ */ */
+static void deactivate_qh(dwc_otg_hcd_t * _hcd, static void deactivate_qh(dwc_otg_hcd_t * _hcd,
+ dwc_otg_qh_t * _qh, int free_qtd) dwc_otg_qh_t * _qh, int free_qtd)
+{ {
+ int continue_split = 0; int continue_split = 0;
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, _ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, _
+ free_qtd); free_qtd);
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, q qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, q
+ if (qtd->complete_split) { if (qtd->complete_split) {
+ continue_split = 1; continue_split = 1;
+ } else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPO } else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPO
+ (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_E (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_E
+ continue_split = 1; continue_split = 1;
+ } }
+
+ if (free_qtd) { if (free_qtd) {
+ /* /*
+ * Note that this was previously a call to * Note that this was previously a call to
+ * dwc_otg_hcd_qtd_remove_and_free(qtd), whic * dwc_otg_hcd_qtd_remove_and_free(qtd), whic
+ * However, that call frees the qtd memory, a * However, that call frees the qtd memory, a
+ * interrupt logic to access it many more tim * interrupt logic to access it many more tim
+ * to it. With slub debugging on, it is clea * to it. With slub debugging on, it is clea
+ * to memory we had freed. * to memory we had freed.
+ * Call this instead, and now I have moved th * Call this instead, and now I have moved th
+ * the end of processing this interrupt. * the end of processing this interrupt.
+ */ */
+ dwc_otg_hcd_qtd_remove(qtd); dwc_otg_hcd_qtd_remove(qtd);
+
+ continue_split = 0; continue_split = 0;
+ } }
+ _qh->channel = NULL; _qh->channel = NULL;
+ _qh->qtd_in_process = NULL; _qh->qtd_in_process = NULL;
+ dwc_otg_hcd_qh_deactivate(_hcd, _qh, continue_split); dwc_otg_hcd_qh_deactivate(_hcd, _qh, continue_split);
+} }
+
+/** /**
+ * Updates the state of an Isochronous URB when the transfer * Updates the state of an Isochronous URB when the transfer
+ * any reason. The fields of the current entry in the frame d * any reason. The fields of the current entry in the frame d
+ * are set based on the transfer state and the input _halt_st * are set based on the transfer state and the input _halt_st
+ * the Isochronous URB if all the URB frames have been comple * the Isochronous URB if all the URB frames have been comple
+ * *
+ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames
+ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_U * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_U
+ */ */
+static dwc_otg_halt_status_e update_isoc_urb_state(dwc_otg_hc static dwc_otg_halt_status_e update_isoc_urb_state(dwc_otg_hc
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_o dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_o
+ dwc_otg_halt_status_e _halt_status) dwc_otg_halt_status_e _halt_status)
+{ {
+ struct urb *urb = _qtd->urb; struct urb *urb = _qtd->urb;
+ dwc_otg_halt_status_e ret_val = _halt_status; dwc_otg_halt_status_e ret_val = _halt_status;
+ struct usb_iso_packet_descriptor *frame_desc; struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &urb->iso_frame_desc[_qtd->isoc_frame_in frame_desc = &urb->iso_frame_desc[_qtd->isoc_frame_in
+ switch (_halt_status) { switch (_halt_status) {
+ case DWC_OTG_HC_XFER_COMPLETE: case DWC_OTG_HC_XFER_COMPLETE:
+ frame_desc->status = 0; frame_desc->status = 0;
+ frame_desc->actual_length = frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, get_actual_xfer_length(_hc, _hc_regs,
+ break; break;
+ case DWC_OTG_HC_XFER_FRAME_OVERRUN: case DWC_OTG_HC_XFER_FRAME_OVERRUN:
+ urb->error_count++; urb->error_count++;
+ if (_hc->ep_is_in) { if (_hc->ep_is_in) {
+ frame_desc->status = -ENOSR; frame_desc->status = -ENOSR;
+ } else { } else {
+ frame_desc->status = -ECOMM; frame_desc->status = -ECOMM;
+ } }
+ frame_desc->actual_length = 0; frame_desc->actual_length = 0;
+ break; break;
+ case DWC_OTG_HC_XFER_BABBLE_ERR: case DWC_OTG_HC_XFER_BABBLE_ERR:
+ urb->error_count++; urb->error_count++;
+ frame_desc->status = -EOVERFLOW; frame_desc->status = -EOVERFLOW;
+
+ /* Don't need to update actual_length in this cas /* Don't need to update actual_length in this cas
+ break; break;
+ case DWC_OTG_HC_XFER_XACT_ERR: case DWC_OTG_HC_XFER_XACT_ERR:
+ urb->error_count++; urb->error_count++;
+ frame_desc->status = -EPROTO; frame_desc->status = -EPROTO;
+ frame_desc->actual_length = frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, _qt get_actual_xfer_length(_hc, _hc_regs, _qt
+ default: default:
+ DWC_ERROR("%s: Unhandled _halt_status (%d)\n" DWC_ERROR("%s: Unhandled _halt_status (%d)\n"
+ BUG(); BUG();
+ break; break;
+ } }
+ if (++_qtd->isoc_frame_index == urb->number_of_packet if (++_qtd->isoc_frame_index == urb->number_of_packet
+ /* /*
+ * urb->status is not used for isoc transfers. * urb->status is not used for isoc transfers.
+ * The individual frame_desc statuses are used in * The individual frame_desc statuses are used in
+ */ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, 0); dwc_otg_hcd_complete_urb(_hcd, urb, 0);
+ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE; ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
+ } else { } else {
+ ret_val = DWC_OTG_HC_XFER_COMPLETE; ret_val = DWC_OTG_HC_XFER_COMPLETE;
+ } }
+ return ret_val; return ret_val;
+} }
+
+/** /**
+ * Releases a host channel for use by other transfers. Attemp * Releases a host channel for use by other transfers. Attemp
+ * queue more transactions since at least one host channel is * queue more transactions since at least one host channel is
+ * *
+ * @param _hcd The HCD state structure. * @param _hcd The HCD state structure.
+ * @param _hc The host channel to release. * @param _hc The host channel to release.
+ * @param _qtd The QTD associated with the host channel. This * @param _qtd The QTD associated with the host channel. This
+ * if the transfer is complete or an error has occurred. * if the transfer is complete or an error has occurred.
+ * @param _halt_status Reason the channel is being released. * @param _halt_status Reason the channel is being released.
+ * determines the actions taken by this function. * determines the actions taken by this function.
+ */ */
+
+static void release_channel(dwc_otg_hcd_t * _hcd, static void release_channel(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_status dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_status
+ dwc_otg_transaction_type_e tr_type; dwc_otg_transaction_type_e tr_type;
+ int free_qtd; int free_qtd;
+ dwc_otg_qh_t * _qh; dwc_otg_qh_t * _qh;
+ int deact = 1; int deact = 1;
+ int retry_delay = 1; int retry_delay = 1;
+ unsigned long flags; <
+
+ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status
+ _hc->hc_num, _halt_status); _hc->hc_num, _halt_status);
+ switch (_halt_status) { switch (_halt_status) {
+ case DWC_OTG_HC_XFER_NYET: case DWC_OTG_HC_XFER_NYET:
+ case DWC_OTG_HC_XFER_NAK: case DWC_OTG_HC_XFER_NAK:
+ if (_halt_status == DWC_OTG_HC_XFER_NYET) { if (_halt_status == DWC_OTG_HC_XFER_NYET) {
+ retry_delay = nyet_deferral_delay; retry_delay = nyet_deferral_delay;
+ } else { } else {
+ retry_delay = nak_deferral_delay; retry_delay = nak_deferral_delay;
+ } }
+ free_qtd = 0; free_qtd = 0;
+ if (deferral_on && _hc->do_split) { if (deferral_on && _hc->do_split) {
+ _qh = _hc->qh; _qh = _hc->qh;
+ if (_qh) { if (_qh) {
+ deact = dwc_otg_hcd_qh_deferr deact = dwc_otg_hcd_qh_deferr
+ } }
+ } }
+ break; break;
+
+ case DWC_OTG_HC_XFER_URB_COMPLETE: case DWC_OTG_HC_XFER_URB_COMPLETE:
+ free_qtd = 1; free_qtd = 1;
+ break; break;
+ case DWC_OTG_HC_XFER_AHB_ERR: case DWC_OTG_HC_XFER_AHB_ERR:
+ case DWC_OTG_HC_XFER_STALL: case DWC_OTG_HC_XFER_STALL:
+ case DWC_OTG_HC_XFER_BABBLE_ERR: case DWC_OTG_HC_XFER_BABBLE_ERR:
+ free_qtd = 1; free_qtd = 1;
+ break; break;
+ case DWC_OTG_HC_XFER_XACT_ERR: case DWC_OTG_HC_XFER_XACT_ERR:
+ if (_qtd->error_count >= 3) { if (_qtd->error_count >= 3) {
+ DWC_DEBUGPL(DBG_HCDV, " Complete URB DWC_DEBUGPL(DBG_HCDV, " Complete URB
+ free_qtd = 1; free_qtd = 1;
+ dwc_otg_hcd_complete_urb(_hcd, _qtd-> dwc_otg_hcd_complete_urb(_hcd, _qtd->
+ } else { } else {
+ free_qtd = 0; free_qtd = 0;
+ } }
+ break; break;
+ case DWC_OTG_HC_XFER_URB_DEQUEUE: case DWC_OTG_HC_XFER_URB_DEQUEUE:
+ /* /*
+ * The QTD has already been removed and the QH ha * The QTD has already been removed and the QH ha
+ * deactivated. Don't want to do anything except * deactivated. Don't want to do anything except
+ * host channel and try to queue more transfers. * host channel and try to queue more transfers.
+ */ */
+ goto cleanup; goto cleanup;
+ case DWC_OTG_HC_XFER_NO_HALT_STATUS: case DWC_OTG_HC_XFER_NO_HALT_STATUS:
+#ifdef CONFIG_DWC_DEBUG <
+ DWC_ERROR("%s: No halt_status, channel %d\n", DWC_ERROR("%s: No halt_status, channel %d\n",
+ _hc->hc_num); _hc->hc_num);
+#endif <
+ free_qtd = 0; free_qtd = 0;
+ break; break;
+ default: default:
+ free_qtd = 0; free_qtd = 0;
+ break; break;
+ } }
+ if (free_qtd) { | *must_free = free_qtd;
+ /* Only change must_free to true (do not set <
+ * pre-initialized to zero). <
+ */ <
+ *must_free = 1; <
+ } <
+ if (deact) { if (deact) {
+ deactivate_qh(_hcd, _hc->qh, free_qtd); deactivate_qh(_hcd, _hc->qh, free_qtd);
+ } }
+cleanup: cleanup:
+ /* /*
+ * Release the host channel for use by other transfers. T * Release the host channel for use by other transfers. T
+ * function clears the channel interrupt enables and cond * function clears the channel interrupt enables and cond
+ * there's no need to clear the Channel Halted interrupt * there's no need to clear the Channel Halted interrupt
+ */ */
+ dwc_otg_hc_cleanup(_hcd->core_if, _hc); dwc_otg_hc_cleanup(_hcd->core_if, _hc);
+ list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_lis list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_lis
+ local_irq_save(flags); | switch (_hc->ep_type) {
+ _hcd->available_host_channels++; | case DWC_OTG_EP_TYPE_CONTROL:
+ local_irq_restore(flags); | case DWC_OTG_EP_TYPE_BULK:
+ | _hcd->non_periodic_channels--;
+ > break;
+ > default:
+ > /*
+ > * Don't release reservations for periodic channe
+ > * That's done when a periodic transfer is desche
+ > * when the QH is removed from the periodic sched
+ > */
+ > break;
+ > }
+ /* Try to queue more transfers now that there's a fre /* Try to queue more transfers now that there's a fre
+ /* unless erratum_usb09_patched is set */ /* unless erratum_usb09_patched is set */
+ if (!erratum_usb09_patched) { if (!erratum_usb09_patched) {
+ tr_type = dwc_otg_hcd_select_transactions(_hc tr_type = dwc_otg_hcd_select_transactions(_hc
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) { if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, dwc_otg_hcd_queue_transactions(_hcd,
+ } }
+ } }
+} }
+
+/** /**
+ * Halts a host channel. If the channel cannot be halted imme * Halts a host channel. If the channel cannot be halted imme
+ * the request queue is full, this function ensures that the * the request queue is full, this function ensures that the
+ * interrupt for the appropriate queue is enabled so that the * interrupt for the appropriate queue is enabled so that the
+ * be queued when there is space in the request queue. * be queued when there is space in the request queue.
+ * *
+ * This function may also be called in DMA mode. In that case * This function may also be called in DMA mode. In that case
+ * simply released since the core always halts the channel au * simply released since the core always halts the channel au
+ * DMA mode. * DMA mode.
+ */ */
+static void halt_channel(dwc_otg_hcd_t * _hcd, static void halt_channel(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_st dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_st
+{ {
+ if (_hcd->core_if->dma_enable) { if (_hcd->core_if->dma_enable) {
+ release_channel(_hcd, _hc, _qtd, _halt_status release_channel(_hcd, _hc, _qtd, _halt_status
+ return; return;
+ } }
+
+ /* Slave mode processing... */ /* Slave mode processing... */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _halt_status); dwc_otg_hc_halt(_hcd->core_if, _hc, _halt_status);
+ if (_hc->halt_on_queue) { if (_hc->halt_on_queue) {
+ gintmsk_data_t gintmsk = {.d32 = 0}; gintmsk_data_t gintmsk = {.d32 = 0};
+ dwc_otg_core_global_regs_t * global_regs; dwc_otg_core_global_regs_t * global_regs;
+ global_regs = _hcd->core_if->core_global_regs global_regs = _hcd->core_if->core_global_regs
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL | if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL |
+ _hc->ep_type == DWC_OTG_EP_TYPE_BULK) _hc->ep_type == DWC_OTG_EP_TYPE_BULK)
+ /* /*
+ * Make sure the Non-periodic Tx FIFO emp * Make sure the Non-periodic Tx FIFO emp
+ * is enabled so that the non-periodic sc * is enabled so that the non-periodic sc
+ * be processed. * be processed.
+ */ */
+ gintmsk.b.nptxfempty = 1; gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintms dwc_modify_reg32(&global_regs->gintms
+ } else { } else {
+ /* /*
+ * Move the QH from the periodic queued s * Move the QH from the periodic queued s
+ * the periodic assigned schedule. This a * the periodic assigned schedule. This a
+ * halt to be queued when the periodic sc * halt to be queued when the periodic sc
+ * processed. * processed.
+ */ */
+ list_move(&_hc->qh->qh_list_entry, list_move(&_hc->qh->qh_list_entry,
+ &_hcd->periodic_sched_assigned) &_hcd->periodic_sched_assigned)
+
+ /* /*
+ * Make sure the Periodic Tx FIFO Empty i * Make sure the Periodic Tx FIFO Empty i
+ * enabled so that the periodic schedule * enabled so that the periodic schedule
+ * processed. * processed.
+ */ */
+ gintmsk.b.ptxfempty = 1; gintmsk.b.ptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintms dwc_modify_reg32(&global_regs->gintms
+ } }
+ } }
+} }
+
+/** /**
+ * Performs common cleanup for non-periodic transfers after a * Performs common cleanup for non-periodic transfers after a
+ * Complete interrupt. This function should be called after a * Complete interrupt. This function should be called after a
+ * specific handling is finished to release the host channel. * specific handling is finished to release the host channel.
+ */ */
+static void complete_non_periodic_xfer(dwc_otg_hcd_t * _hcd, static void complete_non_periodic_xfer(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+ dwc_otg_halt_status_e _halt_status, int *must_free) dwc_otg_halt_status_e _halt_status, int *must_free)
+{ {
+ hcint_data_t hcint; hcint_data_t hcint;
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint); hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ if (hcint.b.nyet) { if (hcint.b.nyet) {
+ /* /*
+ * Got a NYET on the last transaction of the tran * Got a NYET on the last transaction of the tran
+ * means that the endpoint should be in the PING * means that the endpoint should be in the PING
+ * beginning of the next transfer. * beginning of the next transfer.
+ */ */
+ _hc->qh->ping_state = 1; _hc->qh->ping_state = 1;
+ clear_hc_int(_hc_regs, nyet); clear_hc_int(_hc_regs, nyet);
+ } }
+
+ /* /*
+ * Always halt and release the host channel to make it av * Always halt and release the host channel to make it av
+ * more transfers. There may still be more phases for a c * more transfers. There may still be more phases for a c
+ * transfer or more data packets for a bulk transfer at t * transfer or more data packets for a bulk transfer at t
+ * but the host channel is still halted. A channel will b * but the host channel is still halted. A channel will b
+ * to the transfer when the non-periodic schedule is proc * to the transfer when the non-periodic schedule is proc
+ * the channel is released. This allows transactions to b * the channel is released. This allows transactions to b
+ * properly via dwc_otg_hcd_queue_transactions, which als * properly via dwc_otg_hcd_queue_transactions, which als
+ * Tx FIFO Empty interrupt if necessary. * Tx FIFO Empty interrupt if necessary.
+ */ */
+ if (_hc->ep_is_in) { if (_hc->ep_is_in) {
+ /* /*
+ * IN transfers in Slave mode require an explicit * IN transfers in Slave mode require an explicit
+ * halt the channel. (In DMA mode, this call simp * halt the channel. (In DMA mode, this call simp
+ * the channel.) * the channel.)
+ */ */
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_ halt_channel(_hcd, _hc, _qtd, _halt_status, must_
+ } else { } else {
+ /* /*
+ * The channel is automatically disabled by the c * The channel is automatically disabled by the c
+ * transfers in Slave mode. * transfers in Slave mode.
+ */ */
+ release_channel(_hcd, _hc, _qtd, _halt_status, mu release_channel(_hcd, _hc, _qtd, _halt_status, mu
+ } }
+} }
+
+/** /**
+ * Performs common cleanup for periodic transfers after a Tra * Performs common cleanup for periodic transfers after a Tra
+ * interrupt. This function should be called after any endpoi * interrupt. This function should be called after any endpoi
+ * handling is finished to release the host channel. * handling is finished to release the host channel.
+ */ */
+static void complete_periodic_xfer(dwc_otg_hcd_t * _hcd, static void complete_periodic_xfer(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+ dwc_otg_halt_status_e _halt_status, int *must_free) dwc_otg_halt_status_e _halt_status, int *must_free)
+{ {
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (!_hc->ep_is_in || hctsiz.b.pktcnt == 0) { if (!_hc->ep_is_in || hctsiz.b.pktcnt == 0) {
+ /* Core halts channel in these cases. */ /* Core halts channel in these cases. */
+ release_channel(_hcd, _hc, _qtd, _halt_status, mu release_channel(_hcd, _hc, _qtd, _halt_status, mu
+ } else { } else {
+ /* Flush any outstanding requests from the Tx que /* Flush any outstanding requests from the Tx que
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_ halt_channel(_hcd, _hc, _qtd, _halt_status, must_
+ } }
+} }
+
+/** /**
+ * Handles a host channel Transfer Complete interrupt. This h * Handles a host channel Transfer Complete interrupt. This h
+ * called in either DMA mode or Slave mode. * called in either DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ int urb_xfer_done; int urb_xfer_done;
+ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_C dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_C
+ struct urb *urb = _qtd->urb; struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe); int pipe_type = usb_pipetype(urb->pipe);
+ int status = -EINPROGRESS; int status = -EINPROGRESS;
+
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transfer Complete--\n", _hc->hc_num); "Transfer Complete--\n", _hc->hc_num);
+
+ /* /*
+ * Handle xfer complete on CSPLIT. * Handle xfer complete on CSPLIT.
+ */ */
+ if (_hc->qh->do_split) { if (_hc->qh->do_split) {
+ _qtd->complete_split = 0; _qtd->complete_split = 0;
+ } }
+
+ /* Update the QTD and URB states. */ /* Update the QTD and URB states. */
+ switch (pipe_type) { switch (pipe_type) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ switch (_qtd->control_phase) { switch (_qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP: case DWC_OTG_CONTROL_SETUP:
+ if (urb->transfer_buffer_length > 0) if (urb->transfer_buffer_length > 0)
+ _qtd->control_phase = DWC_OTG _qtd->control_phase = DWC_OTG
+ } else { } else {
+ _qtd->control_phase = DWC_OTG _qtd->control_phase = DWC_OTG
+ } }
+ DWC_DEBUGPL(DBG_HCDV, DWC_DEBUGPL(DBG_HCDV,
+ " Control setup transac " Control setup transac
+ halt_status = DWC_OTG_HC_XFER_COMPLET halt_status = DWC_OTG_HC_XFER_COMPLET
+ break; break;
+ case DWC_OTG_CONTROL_DATA:{ case DWC_OTG_CONTROL_DATA:{
+ urb_xfer_done = update_urb_state_xfer urb_xfer_done = update_urb_state_xfer
+ if (urb_xfer_done) { if (urb_xfer_done) {
+ _qtd->control_phase = _qtd->control_phase =
+ DWC_DEBUGPL(DBG_HCDV, DWC_DEBUGPL(DBG_HCDV,
+ } else { } else {
+ save_data_toggle(_hc, save_data_toggle(_hc,
+ } }
+ halt_status = DWC_OTG_HC_XFER halt_status = DWC_OTG_HC_XFER
+ break; break;
+ } }
+ case DWC_OTG_CONTROL_STATUS: case DWC_OTG_CONTROL_STATUS:
+ DWC_DEBUGPL(DBG_HCDV, " Control tran DWC_DEBUGPL(DBG_HCDV, " Control tran
+ if (status == -EINPROGRESS) { if (status == -EINPROGRESS) {
+ status = 0; status = 0;
+ } }
+ dwc_otg_hcd_complete_urb(_hcd, urb, s dwc_otg_hcd_complete_urb(_hcd, urb, s
+ halt_status = DWC_OTG_HC_XFER_URB_COM halt_status = DWC_OTG_HC_XFER_URB_COM
+ break; break;
+ } }
+ complete_non_periodic_xfer(_hcd, _hc, _hc_reg complete_non_periodic_xfer(_hcd, _hc, _hc_reg
+ halt_status, mus halt_status, mus
+ break; break;
+ case PIPE_BULK: case PIPE_BULK:
+ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer comple DWC_DEBUGPL(DBG_HCDV, " Bulk transfer comple
+ urb_xfer_done = update_urb_state_xfer_comp(_h urb_xfer_done = update_urb_state_xfer_comp(_h
+ if (urb_xfer_done) { if (urb_xfer_done) {
+ dwc_otg_hcd_complete_urb(_hcd, urb, s dwc_otg_hcd_complete_urb(_hcd, urb, s
+ halt_status = DWC_OTG_HC_XFER_URB_COM halt_status = DWC_OTG_HC_XFER_URB_COM
+ } else { } else {
+ halt_status = DWC_OTG_HC_XFER_COMPLET halt_status = DWC_OTG_HC_XFER_COMPLET
+ } }
+ save_data_toggle(_hc, _hc_regs, _qtd); save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_non_periodic_xfer(_hcd, _hc, _hc_reg complete_non_periodic_xfer(_hcd, _hc, _hc_reg
+ break; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer c DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer c
+ update_urb_state_xfer_comp(_hc, _hc_regs, urb update_urb_state_xfer_comp(_hc, _hc_regs, urb
+ /* /*
+ * Interrupt URB is done on the first transfer co * Interrupt URB is done on the first transfer co
+ * interrupt. * interrupt.
+ */ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, status); dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ save_data_toggle(_hc, _hc_regs, _qtd); save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _ complete_periodic_xfer(_hcd, _hc, _hc_regs, _
+ DWC_OTG_HC_XFER_URB_C DWC_OTG_HC_XFER_URB_C
+ break; break;
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer
+ if (_qtd->isoc_split_pos == DWC_HCSPLIT_XACTP if (_qtd->isoc_split_pos == DWC_HCSPLIT_XACTP
+ halt_status = update_isoc_urb_state(_ halt_status = update_isoc_urb_state(_
+ DWC DWC
+ } }
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _ complete_periodic_xfer(_hcd, _hc, _hc_regs, _
+ break; break;
+ } }
+ disable_hc_int(_hc_regs, xfercompl); disable_hc_int(_hc_regs, xfercompl);
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel STALL interrupt. This handler may b * Handles a host channel STALL interrupt. This handler may b
+ * either DMA mode or Slave mode. * either DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_stall_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_stall_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ struct urb *urb = _qtd->urb; struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe); int pipe_type = usb_pipetype(urb->pipe);
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "STALL Received--\n", _hc->hc_num); "STALL Received--\n", _hc->hc_num);
+ if (pipe_type == PIPE_CONTROL) { if (pipe_type == PIPE_CONTROL) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EP dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EP
+ } }
+ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTER if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTER
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EP dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EP
+ /* /*
+ * USB protocol requires resetting the data toggl * USB protocol requires resetting the data toggl
+ * and interrupt endpoints when a CLEAR_FEATURE(E * and interrupt endpoints when a CLEAR_FEATURE(E
+ * setup command is issued to the endpoint. Antic * setup command is issued to the endpoint. Antic
+ * CLEAR_FEATURE command since a STALL has occurr * CLEAR_FEATURE command since a STALL has occurr
+ * the data toggle now. * the data toggle now.
+ */ */
+ _hc->qh->data_toggle = 0; _hc->qh->data_toggle = 0;
+ } }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_STALL, halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_STALL,
+ disable_hc_int(_hc_regs, stall); disable_hc_int(_hc_regs, stall);
+ return 1; return 1;
+} }
+
+/* /*
+ * Updates the state of the URB when a transfer has been stop * Updates the state of the URB when a transfer has been stop
+ * abnormal condition before the transfer completes. Modifies * abnormal condition before the transfer completes. Modifies
+ * actual_length field of the URB to reflect the number of by * actual_length field of the URB to reflect the number of by
+ * actually been transferred via the host channel. * actually been transferred via the host channel.
+ */ */
+static void update_urb_state_xfer_intr(dwc_hc_t * _hc, static void update_urb_state_xfer_intr(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb, dwc_o dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb, dwc_o
+ dwc_otg_halt_status_e _halt_status) dwc_otg_halt_status_e _halt_status)
+{ {
+ uint32_t bytes_transferred = uint32_t bytes_transferred =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt
+ _urb->actual_length += bytes_transferred; _urb->actual_length += bytes_transferred;
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ { {
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, chann DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, chann
+ __func__, (_hc->ep_is_in ? "IN" __func__, (_hc->ep_is_in ? "IN"
+ DWC_DEBUGPL(DBG_HCDV, " _hc->start_pkt_count DWC_DEBUGPL(DBG_HCDV, " _hc->start_pkt_count
+ _hc->start_pkt_count); _hc->start_pkt_count);
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n",
+ DWC_DEBUGPL(DBG_HCDV, " _hc->max_packet %d\n DWC_DEBUGPL(DBG_HCDV, " _hc->max_packet %d\n
+ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d
+ bytes_transferred); bytes_transferred);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->actual_length DWC_DEBUGPL(DBG_HCDV, " _urb->actual_length
+ _urb->actual_length); _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->transfer_buffe DWC_DEBUGPL(DBG_HCDV, " _urb->transfer_buffe
+ _urb->transfer_buffer_length); _urb->transfer_buffer_length);
+ } }
+#endif /* */ #endif /* */
+} }
+
+/** /**
+ * Handles a host channel NAK interrupt. This handler may be * Handles a host channel NAK interrupt. This handler may be
+ * DMA mode or Slave mode. * DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_nak_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_nak_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NAK Received--\n", _hc->hc_num); "NAK Received--\n", _hc->hc_num);
+ /* /*
+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, c * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, c
+ * interrupt. Re-start the SSPLIT transfer. * interrupt. Re-start the SSPLIT transfer.
+ */ */
+ if (_hc->do_split) { if (_hc->do_split) {
+ if (_hc->complete_split) { if (_hc->complete_split) {
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ } }
+ _qtd->complete_split = 0; _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER
+ goto handle_nak_done; goto handle_nak_done;
+ } }
+ switch (usb_pipetype(_qtd->urb->pipe)) { switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ case PIPE_BULK: case PIPE_BULK:
+ if (_hcd->core_if->dma_enable && _hc->ep_is_i if (_hcd->core_if->dma_enable && _hc->ep_is_i
+ /* /*
+ * NAK interrupts are enabled on bulk/con * NAK interrupts are enabled on bulk/con
+ * transfers in DMA mode for the sole pur * transfers in DMA mode for the sole pur
+ * resetting the error count after a tran * resetting the error count after a tran
+ * occurs. The core will continue transfe * occurs. The core will continue transfe
+ */ */
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ goto handle_nak_done; goto handle_nak_done;
+ } }
+
+ /* /*
+ * NAK interrupts normally occur during OUT trans * NAK interrupts normally occur during OUT trans
+ * or Slave mode. For IN transfers, more requests * or Slave mode. For IN transfers, more requests
+ * queued as request queue space is available. * queued as request queue space is available.
+ */ */
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ if (!_hc->qh->ping_state) { if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_r update_urb_state_xfer_intr(_hc, _hc_r
+ _qtd, DWC _qtd, DWC
+ save_data_toggle(_hc, _hc_regs, _qtd) save_data_toggle(_hc, _hc_regs, _qtd)
+ if (_qtd->urb->dev->speed == USB_SPEE if (_qtd->urb->dev->speed == USB_SPEE
+ _hc->qh->ping_state = 1; _hc->qh->ping_state = 1;
+ } }
+ } }
+
+ /* /*
+ * Halt the channel so the transfer can be re-sta * Halt the channel so the transfer can be re-sta
+ * the appropriate point or the PING protocol wil * the appropriate point or the PING protocol wil
+ * start/continue. * start/continue.
+ */ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK
+ break; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER
+ break; break;
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ /* Should never get called for isochronous transf /* Should never get called for isochronous transf
+ BUG(); BUG();
+ break; break;
+ } }
+ handle_nak_done:disable_hc_int(_hc_regs, nak); handle_nak_done:disable_hc_int(_hc_regs, nak);
+ > clear_hc_int(_hc_regs, nak);
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel ACK interrupt. This interrupt is en * Handles a host channel ACK interrupt. This interrupt is en
+ * performing the PING protocol in Slave mode, when errors oc * performing the PING protocol in Slave mode, when errors oc
+ * either Slave mode or DMA mode, and during Start Split tran * either Slave mode or DMA mode, and during Start Split tran
+ */ */
+static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "ACK Received--\n", _hc->hc_num); "ACK Received--\n", _hc->hc_num);
+ if (_hc->do_split) { if (_hc->do_split) {
+ /* /*
+ * Handle ACK on SSPLIT. * Handle ACK on SSPLIT.
+ * ACK should not occur in CSPLIT. * ACK should not occur in CSPLIT.
+ */ */
+ if ((!_hc->ep_is_in) && (_hc->data_pid_start != D if ((!_hc->ep_is_in) && (_hc->data_pid_start != D
+ _qtd->ssplit_out_xfer_count = _hc->xf _qtd->ssplit_out_xfer_count = _hc->xf
+ } }
+ if (!(_hc->ep_type == DWC_OTG_EP_TYPE_ISOC && if (!(_hc->ep_type == DWC_OTG_EP_TYPE_ISOC &&
+ /* Don't need complete for isochr /* Don't need complete for isochr
+ _qtd->complete_split = 1; _qtd->complete_split = 1;
+ } }
+
+ /* ISOC OUT */ /* ISOC OUT */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && !_h if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && !_h
+ switch (_hc->xact_pos) { switch (_hc->xact_pos) {
+ case DWC_HCSPLIT_XACTPOS_ALL: case DWC_HCSPLIT_XACTPOS_ALL:
+ break; break;
+ case DWC_HCSPLIT_XACTPOS_END: case DWC_HCSPLIT_XACTPOS_END:
+ _qtd->isoc_split_pos = DWC_HC _qtd->isoc_split_pos = DWC_HC
+ _qtd->isoc_split_offset = 0; _qtd->isoc_split_offset = 0;
+ break; break;
+ case DWC_HCSPLIT_XACTPOS_BEGIN: case DWC_HCSPLIT_XACTPOS_BEGIN:
+ case DWC_HCSPLIT_XACTPOS_MID: case DWC_HCSPLIT_XACTPOS_MID:
+ /* /*
+ * For BEGIN or MID, calculate th * For BEGIN or MID, calculate th
+ * the next microframe to determi * the next microframe to determi
+ * SSPLIT token, either MID or EN * SSPLIT token, either MID or EN
+ */ */
+ do { do {
+ struct usb_iso_packet struct usb_iso_packet
+ frame_desc = &_qtd->u frame_desc = &_qtd->u
+ _qtd->isoc_split_offs _qtd->isoc_split_offs
+ if ((frame_desc->leng if ((frame_desc->leng
+ _qtd->isoc_sp _qtd->isoc_sp
+ } else { } else {
+ _qtd->isoc_sp _qtd->isoc_sp
+ } }
+ } while (0); } while (0);
+ break; break;
+ } }
+ } else { } else {
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG halt_channel(_hcd, _hc, _qtd, DWC_OTG
+ } }
+ } else { } else {
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ if (_hc->qh->ping_state) { if (_hc->qh->ping_state) {
+ _hc->qh->ping_state = 0; _hc->qh->ping_state = 0;
+
+ /* /*
+ * Halt the channel so the transfer can b * Halt the channel so the transfer can b
+ * from the appropriate point. This only * from the appropriate point. This only
+ * Slave mode. In DMA mode, the ping_stat * Slave mode. In DMA mode, the ping_stat
+ * when the transfer is started because t * when the transfer is started because t
+ * automatically executes the PING, then * automatically executes the PING, then
+ */ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_
+ } else { <
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_s <
+ } }
+ } }
+
+ /* /*
+ * If the ACK occurred when _not_ in the PING state, let * If the ACK occurred when _not_ in the PING state, let
+ * continue transferring data after clearing the error co * continue transferring data after clearing the error co
+ */ */
+ disable_hc_int(_hc_regs, ack); disable_hc_int(_hc_regs, ack);
+ > clear_hc_int(_hc_regs, ack);
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel NYET interrupt. This interrupt shou * Handles a host channel NYET interrupt. This interrupt shou
+ * Bulk and Control OUT endpoints and for complete split tran * Bulk and Control OUT endpoints and for complete split tran
+ * NYET occurs at the same time as a Transfer Complete interr * NYET occurs at the same time as a Transfer Complete interr
+ * handled in the xfercomp interrupt handler, not here. This * handled in the xfercomp interrupt handler, not here. This
+ * called in either DMA mode or Slave mode. * called in either DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NYET Received--\n", _hc->hc_num); "NYET Received--\n", _hc->hc_num);
+
+ /* /*
+ * NYET on CSPLIT * NYET on CSPLIT
+ * re-do the CSPLIT immediately on non-periodic * re-do the CSPLIT immediately on non-periodic
+ */ */
+ if ((_hc->do_split) && (_hc->complete_split)) { if ((_hc->do_split) && (_hc->complete_split)) {
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_INTR) || if ((_hc->ep_type == DWC_OTG_EP_TYPE_INTR) ||
+ (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC)) (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC))
+ int frnum = dwc_otg_hcd_get_frame_num int frnum = dwc_otg_hcd_get_frame_num
+ (_hc (_hc
+ if (dwc_full_frame_num(frnum) != if (dwc_full_frame_num(frnum) !=
+ dwc_full_frame_num(_hc->qh->sch dwc_full_frame_num(_hc->qh->sch
+
+ /* /*
+ * No longer in the same full spe * No longer in the same full spe
+ * Treat this as a transaction er * Treat this as a transaction er
+ */ */
+#if 0 #if 0
+ /** @todo Fix system performa /** @todo Fix system performa
+ * be treated as an error. Ri * be treated as an error. Ri
+ * splits cannot be scheduled * splits cannot be scheduled
+ * due to other system activi * due to other system activi
+ * occurs regularly in Slave * occurs regularly in Slave
+ */ */
+ _qtd->error_count++; _qtd->error_count++;
+
+#endif /* */ #endif /* */
+ _qtd->complete_split = 0; _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, halt_channel(_hcd, _hc, _qtd,
+
+ /** @todo add support for iso /** @todo add support for iso
+ goto handle_nyet_done; goto handle_nyet_done;
+ } }
+ } }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER
+ goto handle_nyet_done; goto handle_nyet_done;
+ } }
+ _hc->qh->ping_state = 1; _hc->qh->ping_state = 1;
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
+ DWC_OTG_HC_XFER_NYET); DWC_OTG_HC_XFER_NYET);
+ save_data_toggle(_hc, _hc_regs, _qtd); save_data_toggle(_hc, _hc_regs, _qtd);
+
+ /* /*
+ * Halt the channel and re-start the transfer so the PING * Halt the channel and re-start the transfer so the PING
+ * protocol will start. * protocol will start.
+ */ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_
+handle_nyet_done: handle_nyet_done:
+ disable_hc_int(_hc_regs, nyet); disable_hc_int(_hc_regs, nyet);
+ clear_hc_int(_hc_regs, nyet); clear_hc_int(_hc_regs, nyet);
+
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel babble interrupt. This handler may * Handles a host channel babble interrupt. This handler may
+ * either DMA mode or Slave mode. * either DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_babble_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_babble_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Babble Error--\n", _hc->hc_num); "Babble Error--\n", _hc->hc_num);
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { if (_hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EO dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EO
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER
+ } else { } else {
+ dwc_otg_halt_status_e halt_status; dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc halt_status = update_isoc_urb_state(_hcd, _hc
+ DWC_OTG_HC_XFER_BAB DWC_OTG_HC_XFER_BAB
+ halt_channel(_hcd, _hc, _qtd, halt_status, mu halt_channel(_hcd, _hc, _qtd, halt_status, mu
+ } }
+ disable_hc_int(_hc_regs, bblerr); disable_hc_int(_hc_regs, bblerr);
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel AHB error interrupt. This handler i * Handles a host channel AHB error interrupt. This handler i
+ * DMA mode. * DMA mode.
+ */ */
+static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ hcchar_data_t hcchar; hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt; hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ uint32_t hcdma; uint32_t hcdma;
+ struct urb *urb = _qtd->urb; struct urb *urb = _qtd->urb;
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "AHB Error--\n", _hc->hc_num); "AHB Error--\n", _hc->hc_num);
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt); hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz); hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ hcdma = dwc_read_reg32(&_hc_regs->hcdma); hcdma = dwc_read_reg32(&_hc_regs->hcdma);
+ DWC_ERROR("AHB ERROR, Channel %d\n", _hc->hc_num); DWC_ERROR("AHB ERROR, Channel %d\n", _hc->hc_num);
+ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar. DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.
+ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
+ DWC_ERROR(" Device address: %d\n", usb_pipedevice(ur DWC_ERROR(" Device address: %d\n", usb_pipedevice(ur
+ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(ur DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(ur
+ (usb_pipein(urb->pipe) ? "IN" : "OUT" (usb_pipein(urb->pipe) ? "IN" : "OUT"
+ DWC_ERROR(" Endpoint type: %s\n", ( { DWC_ERROR(" Endpoint type: %s\n", ( {
+ char *pipetype; char *pipetype;
+ switch (usb_pipetype(urb->pipe)) { switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ pipetype = "CONTROL"; break; pipetype = "CONTROL"; break;
+ case PIPE_BULK: case PIPE_BULK:
+ pipetype = "BULK"; break; pipetype = "BULK"; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ pipetype = "INTERRUPT"; break pipetype = "INTERRUPT"; break
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS"; bre pipetype = "ISOCHRONOUS"; bre
+ default: default:
+ pipetype = "UNKNOWN"; break; pipetype = "UNKNOWN"; break;
+ }; };
+ pipetype; pipetype;
+ } )) ; } )) ;
+ DWC_ERROR(" Speed: %s\n", ( { DWC_ERROR(" Speed: %s\n", ( {
+ char *speed; char *speed;
+ switch (urb->dev->speed) { switch (urb->dev->speed) {
+ case USB_SPEED_HIGH: case USB_SPEED_HIGH:
+ speed = "HIGH"; break; speed = "HIGH"; break;
+ case USB_SPEED_FULL: case USB_SPEED_FULL:
+ speed = "FULL"; break; speed = "FULL"; break;
+ case USB_SPEED_LOW: case USB_SPEED_LOW:
+ speed = "LOW"; break; speed = "LOW"; break;
+ default: default:
+ speed = "UNKNOWN"; break; speed = "UNKNOWN"; break;
+ }; };
+ speed; speed;
+ } )) ; } )) ;
+ DWC_ERROR(" Max packet size: %d\n", DWC_ERROR(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pip usb_maxpacket(urb->dev, urb->pipe, usb_pip
+ DWC_ERROR(" Data buffer length: %d\n", urb->transfer DWC_ERROR(" Data buffer length: %d\n", urb->transfer
+ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n" DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n"
+ urb->transfer_buffer, (void *)(u32)urb->tra urb->transfer_buffer, (void *)(u32)urb->tra
+ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", urb- DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", urb-
+ (void *)(u32)urb->setup_dma); (void *)(u32)urb->setup_dma);
+ DWC_ERROR(" Interval: %d\n", urb->interval); DWC_ERROR(" Interval: %d\n", urb->interval);
+ dwc_otg_hcd_complete_urb(_hcd, urb, -EIO); dwc_otg_hcd_complete_urb(_hcd, urb, -EIO);
+
+ /* /*
+ * Force a channel halt. Don't call halt_channel because * Force a channel halt. Don't call halt_channel because
+ * write to the HCCHARn register in DMA mode to force the * write to the HCCHARn register in DMA mode to force the
+ */ */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, DWC_OTG_HC_XFER_AHB_E dwc_otg_hc_halt(_hcd->core_if, _hc, DWC_OTG_HC_XFER_AHB_E
+ disable_hc_int(_hc_regs, ahberr); disable_hc_int(_hc_regs, ahberr);
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel transaction error interrupt. This h * Handles a host channel transaction error interrupt. This h
+ * called in either DMA mode or Slave mode. * called in either DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transaction Error--\n", _hc->hc_num); "Transaction Error--\n", _hc->hc_num);
+ switch (usb_pipetype(_qtd->urb->pipe)) { switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ case PIPE_BULK: case PIPE_BULK:
+ _qtd->error_count++; _qtd->error_count++;
+ if (!_hc->qh->ping_state) { if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_r update_urb_state_xfer_intr(_hc, _hc_r
+ _qtd, DWC_OTG_HC_XFER_XAC _qtd, DWC_OTG_HC_XFER_XAC
+ save_data_toggle(_hc, _hc_regs, _qtd) save_data_toggle(_hc, _hc_regs, _qtd)
+ if (!_hc->ep_is_in && _qtd->urb->dev- if (!_hc->ep_is_in && _qtd->urb->dev-
+ _hc->qh->ping_state = 1; _hc->qh->ping_state = 1;
+ } }
+ } }
+
+ /* /*
+ * Halt the channel so the transfer can be re-sta * Halt the channel so the transfer can be re-sta
+ * the appropriate point or the PING protocol wil * the appropriate point or the PING protocol wil
+ */ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XAC halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XAC
+ break; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ _qtd->error_count++; _qtd->error_count++;
+ if ((_hc->do_split) && (_hc->complete_split)) if ((_hc->do_split) && (_hc->complete_split))
+ _qtd->complete_split = 0; _qtd->complete_split = 0;
+ } }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER
+ break; break;
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ { {
+ dwc_otg_halt_status_e halt_status; dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_ halt_status = update_isoc_urb_state(_
+ DWC_OTG_HC_ DWC_OTG_HC_
+ halt_channel(_hcd, _hc, _qtd, halt_st halt_channel(_hcd, _hc, _qtd, halt_st
+ } }
+ break; break;
+ } }
+ disable_hc_int(_hc_regs, xacterr); disable_hc_int(_hc_regs, xacterr);
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel frame overrun interrupt. This handl * Handles a host channel frame overrun interrupt. This handl
+ * in either DMA mode or Slave mode. * in either DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Frame Overrun--\n", _hc->hc_num); "Frame Overrun--\n", _hc->hc_num);
+ switch (usb_pipetype(_qtd->urb->pipe)) { switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ case PIPE_BULK: case PIPE_BULK:
+ break; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER
+ break; break;
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ { {
+ dwc_otg_halt_status_e halt_status; dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_ halt_status = update_isoc_urb_state(_
+ DWC_OTG_HC_ DWC_OTG_HC_
+ halt_channel(_hcd, _hc, _qtd, halt_st halt_channel(_hcd, _hc, _qtd, halt_st
+ } }
+ break; break;
+ } }
+ disable_hc_int(_hc_regs, frmovrun); disable_hc_int(_hc_regs, frmovrun);
+ return 1; return 1;
+} }
+
+/** /**
+ * Handles a host channel data toggle error interrupt. This h * Handles a host channel data toggle error interrupt. This h
+ * called in either DMA mode or Slave mode. * called in either DMA mode or Slave mode.
+ */ */
+static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t * _hcd static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t * _hcd
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Data Toggle Error--\n", _hc->hc_num); "Data Toggle Error--\n", _hc->hc_num);
+ if (_hc->ep_is_in) { if (_hc->ep_is_in) {
+ _qtd->error_count = 0; _qtd->error_count = 0;
+ } else { } else {
+ DWC_ERROR("Data Toggle Error on OUT transfer, DWC_ERROR("Data Toggle Error on OUT transfer,
+ "channel %d\n", _hc->hc_num); "channel %d\n", _hc->hc_num);
+ } }
+ disable_hc_int(_hc_regs, datatglerr); disable_hc_int(_hc_regs, datatglerr);
+ return 1; return 1;
+} }
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+/** /**
+ * This function is for debug only. It checks that a valid ha * This function is for debug only. It checks that a valid ha
+ * and that HCCHARn.chdis is clear. If there's a problem, cor * and that HCCHARn.chdis is clear. If there's a problem, cor
+ * taken and a warning is issued. * taken and a warning is issued.
+ * @return 1 if halt status is ok, 0 otherwise. * @return 1 if halt status is ok, 0 otherwise.
+ */ */
+static inline int halt_status_ok(dwc_otg_hcd_t * _hcd, static inline int halt_status_ok(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg | dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ hcchar_data_t hcchar; hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz; hctsiz_data_t hctsiz;
+ hcint_data_t hcint; hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk; hcintmsk_data_t hcintmsk;
+ hcsplt_data_t hcsplt; hcsplt_data_t hcsplt;
+ if (_hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATU if (_hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATU
+ /* /*
+ * This code is here only as a check. This condit * This code is here only as a check. This condit
+ * never happen. Ignore the halt if it does occur * never happen. Ignore the halt if it does occur
+ */ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint); hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcin hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcin
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt
+ DWC_WARN("%s: _hc->halt_status == DWC_OTG_HC_ DWC_WARN("%s: _hc->halt_status == DWC_OTG_HC_
+ "channel %d, hcchar 0x%08x, hctsiz 0x%0 "channel %d, hcchar 0x%08x, hctsiz 0x%0
+ "hcint 0x%08x, hcintmsk 0x%08x, " "hcint 0x%08x, hcintmsk 0x%08x, "
+ "hcsplt 0x%08x, qtd->complete_split %d\n "hcsplt 0x%08x, qtd->complete_split %d\n
+ _hc->hc_num, hcchar.d32, hctsiz.d32, hci _hc->hc_num, hcchar.d32, hctsiz.d32, hci
+ hcintmsk.d32, hcsplt.d32, _qtd->complete hcintmsk.d32, hcsplt.d32, _qtd->complete
+ DWC_WARN("%s: no halt status, channel %d, ign DWC_WARN("%s: no halt status, channel %d, ign
+ __func__, _hc->hc_num); __func__, _hc->hc_num);
+ DWC_WARN("\n"); DWC_WARN("\n");
+ clear_hc_int(_hc_regs, chhltd); clear_hc_int(_hc_regs, chhltd);
+ return 0; return 0;
+ } }
+
+ /* /*
+ * This code is here only as a check. hcchar.chdis should * This code is here only as a check. hcchar.chdis should
+ * never be set when the halt interrupt occurs. Halt the * never be set when the halt interrupt occurs. Halt the
+ * channel again if it does occur. * channel again if it does occur.
+ */ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar); hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ if (hcchar.b.chdis) { if (hcchar.b.chdis) {
+ DWC_WARN("%s: hcchar.chdis set unexpectedly, DWC_WARN("%s: hcchar.chdis set unexpectedly,
+ "hcchar 0x%08x, trying to halt agai "hcchar 0x%08x, trying to halt agai
+ hcchar.d32); hcchar.d32);
+ clear_hc_int(_hc_regs, chhltd); clear_hc_int(_hc_regs, chhltd);
+ _hc->halt_pending = 0; _hc->halt_pending = 0;
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_statu halt_channel(_hcd, _hc, _qtd, _hc->halt_statu
+ return 0; return 0;
+ } }
+ return 1; return 1;
+} }
+#endif /* */ #endif /* */
+
+/** /**
+ * Handles a host Channel Halted interrupt in DMA mode. This * Handles a host Channel Halted interrupt in DMA mode. This
+ * determines the reason the channel halted and proceeds acco * determines the reason the channel halted and proceeds acco
+ */ */
+static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * _hcd, static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ hcint_data_t hcint; hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk; hcintmsk_data_t hcintmsk;
+ if (_hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE | if (_hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE |
+ _hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) _hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR)
+ /* /*
+ * Just release the channel. A dequeue can happen * Just release the channel. A dequeue can happen
+ * transfer timeout. In the case of an AHB Error, * transfer timeout. In the case of an AHB Error,
+ * was forced to halt because there's no way to g * was forced to halt because there's no way to g
+ * recover. * recover.
+ */ */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status release_channel(_hcd, _hc, _qtd, _hc->halt_status
+ return; return;
+ } }
+
+ /* Read the HCINTn register to determine the cause for th /* Read the HCINTn register to determine the cause for th
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint); hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk); hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
+ if (hcint.b.xfercomp) { if (hcint.b.xfercomp) {
+
+ /** @todo This is here because of a possible /** @todo This is here because of a possible
+ * says that on SPLIT-ISOC OUT transfers in D * says that on SPLIT-ISOC OUT transfers in D
+ * interrupt w/ACK bit set should occur, but * interrupt w/ACK bit set should occur, but
+ * XFERCOMP bit, even with it masked out. Th * XFERCOMP bit, even with it masked out. Th
+ * for that behavior. Should fix this when h * for that behavior. Should fix this when h
+ */ */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!_ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!_
+ handle_hc_ack_intr(_hcd, _hc, _hc_reg handle_hc_ack_intr(_hcd, _hc, _hc_reg
+ } }
+ handle_hc_xfercomp_intr(_hcd, _hc, _hc_regs, handle_hc_xfercomp_intr(_hcd, _hc, _hc_regs,
+ } else if (hcint.b.stall) { } else if (hcint.b.stall) {
+ handle_hc_stall_intr(_hcd, _hc, _hc_regs, _qt handle_hc_stall_intr(_hcd, _hc, _hc_regs, _qt
+ } else if (hcint.b.xacterr) { } else if (hcint.b.xacterr) {
+ /* /*
+ * Must handle xacterr before nak or ack. Could g * Must handle xacterr before nak or ack. Could g
+ * at the same time as either of these on a BULK/ * at the same time as either of these on a BULK/
+ * that started with a PING. The xacterr takes pr * that started with a PING. The xacterr takes pr
+ */ */
+ handle_hc_xacterr_intr(_hcd, _hc, _hc_regs, _qtd, handle_hc_xacterr_intr(_hcd, _hc, _hc_regs, _qtd,
+ } else if (hcint.b.nyet) { } else if (hcint.b.nyet) {
+ /* /*
+ * Must handle nyet before nak or ack. Could get * Must handle nyet before nak or ack. Could get
+ * same time as either of those on a BULK/CONTROL * same time as either of those on a BULK/CONTROL
+ * started with a PING. The nyet takes precedence * started with a PING. The nyet takes precedence
+ */ */
+ handle_hc_nyet_intr(_hcd, _hc, _hc_regs, _qtd, mu handle_hc_nyet_intr(_hcd, _hc, _hc_regs, _qtd, mu
+ } else if (hcint.b.bblerr) { } else if (hcint.b.bblerr) {
+ handle_hc_babble_intr(_hcd, _hc, _hc_regs, _q handle_hc_babble_intr(_hcd, _hc, _hc_regs, _q
+ } else if (hcint.b.frmovrun) { } else if (hcint.b.frmovrun) {
+ handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs, handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs,
+ } else if (hcint.b.datatglerr) { } else if (hcint.b.datatglerr) {
+ handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs
+ _hc->qh->data_toggle = 0; <
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_statu <
+ } else if (hcint.b.nak && !hcintmsk.b.nak) { } else if (hcint.b.nak && !hcintmsk.b.nak) {
+ /* /*
+ * If nak is not masked, it's because a non-split * If nak is not masked, it's because a non-split
+ * is in an error state. In that case, the nak is * is in an error state. In that case, the nak is
+ * the nak interrupt handler, not here. Handle na * the nak interrupt handler, not here. Handle na
+ * BULK/CONTROL OUT transfers, which halt on a NA * BULK/CONTROL OUT transfers, which halt on a NA
+ * rewinding the buffer pointer. * rewinding the buffer pointer.
+ */ */
+ handle_hc_nak_intr(_hcd, _hc, _hc_regs, _qtd, mus handle_hc_nak_intr(_hcd, _hc, _hc_regs, _qtd, mus
+ } else if (hcint.b.ack && !hcintmsk.b.ack) { } else if (hcint.b.ack && !hcintmsk.b.ack) {
+ /* /*
+ * If ack is not masked, it's because a non-split * If ack is not masked, it's because a non-split
+ * is in an error state. In that case, the ack is * is in an error state. In that case, the ack is
+ * the ack interrupt handler, not here. Handle ac * the ack interrupt handler, not here. Handle ac
+ * split transfers. Start splits halt on ACK. * split transfers. Start splits halt on ACK.
+ */ */
+ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, mus handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, mus
+ } else { } else {
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) _hc->ep_type == DWC_OTG_EP_TYPE_ISOC)
+ /* /*
+ * A periodic transfer halted with no oth * A periodic transfer halted with no oth
+ * interrupts set. Assume it was halted b * interrupts set. Assume it was halted b
+ * because it could not be completed in i * because it could not be completed in i
+ * (micro)frame. * (micro)frame.
+ */ */
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ DWC_PRINT("%s: Halt channel %d (assume in DWC_PRINT("%s: Halt channel %d (assume in
+ __func__, _hc->hc_num); __func__, _hc->hc_num);
+
+#endif /* */ #endif /* */
+ halt_channel(_hcd, _hc, _qtd, halt_channel(_hcd, _hc, _qtd,
+ DWC_OTG_HC_XFER_PERI DWC_OTG_HC_XFER_PERI
+ } else { } else {
+#ifdef CONFIG_DWC_DEBUG <
+ DWC_ERROR("%s: Channel %d, DMA Mode - DWC_ERROR("%s: Channel %d, DMA Mode -
+ "for halting is unknown, nyet %d "for halting is unknown, nyet %d
+ __func__, _hc->hc_num, hcint.b.n __func__, _hc->hc_num, hcint.b.n
+ dwc_read_reg32(&_hcd->core_i dwc_read_reg32(&_hcd->core_i
+#endif <
+ halt_channel(_hcd, _hc, _qtd, _hc->ha <
+ } }
+ } }
+} }
+
+/** /**
+ * Handles a host channel Channel Halted interrupt. * Handles a host channel Channel Halted interrupt.
+ * *
+ * In slave mode, this handler is called only when the driver * In slave mode, this handler is called only when the driver
+ * requests a halt. This occurs during handling other host ch * requests a halt. This occurs during handling other host ch
+ * (e.g. nak, xacterr, stall, nyet, etc.). * (e.g. nak, xacterr, stall, nyet, etc.).
+ * *
+ * In DMA mode, this is the interrupt that occurs when the co * In DMA mode, this is the interrupt that occurs when the co
+ * processing a transfer on a channel. Other host channel int * processing a transfer on a channel. Other host channel int
+ * ahberr) are disabled in DMA mode. * ahberr) are disabled in DMA mode.
+ */ */
+static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t * _hcd, static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg
+{ {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Channel Halted--\n", _hc->hc_num); "Channel Halted--\n", _hc->hc_num);
+ if (_hcd->core_if->dma_enable) { if (_hcd->core_if->dma_enable) {
+ handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs
+ } else { } else {
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd, mu | if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd)) {
+ return 1; return 1;
+ } }
+#endif /* */ #endif /* */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status release_channel(_hcd, _hc, _qtd, _hc->halt_status
+ } }
+ > clear_hc_int(_hc_regs, chhltd);
+ return 1; return 1;
+} }
+
+/** Handles interrupt for a specific Host Channel */ /** Handles interrupt for a specific Host Channel */
+int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t * _dwc_otg int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t * _dwc_otg
+{ {
+ int must_free = 0; int must_free = 0;
+ int retval = 0; int retval = 0;
+ hcint_data_t hcint; hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk; hcintmsk_data_t hcintmsk;
+ dwc_hc_t * hc; dwc_hc_t * hc;
+ dwc_otg_hc_regs_t * hc_regs; dwc_otg_hc_regs_t * hc_regs;
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Ch DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Ch
+ hc = _dwc_otg_hcd->hc_ptr_array[_num]; hc = _dwc_otg_hcd->hc_ptr_array[_num];
+ hc_regs = _dwc_otg_hcd->core_if->host_if->hc_regs[_nu hc_regs = _dwc_otg_hcd->core_if->host_if->hc_regs[_nu
+ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
+ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x
+ hcint.d32, hcintmsk.d32, (hcint.d32 & hc hcint.d32, hcintmsk.d32, (hcint.d32 & hc
+ hcint.d32 = hcint.d32 & hcintmsk.d32; hcint.d32 = hcint.d32 & hcintmsk.d32;
+ if (!_dwc_otg_hcd->core_if->dma_enable) { if (!_dwc_otg_hcd->core_if->dma_enable) {
+ if ((hcint.b.chhltd) && (hcint.d32 != 0x2)) { if ((hcint.b.chhltd) && (hcint.d32 != 0x2)) {
+ hcint.b.chhltd = 0; hcint.b.chhltd = 0;
+ } }
+ } }
+ if (hcint.b.xfercomp) { if (hcint.b.xfercomp) {
+ retval |= handle_hc_xfercomp_intr(_dwc_otg_hc retval |= handle_hc_xfercomp_intr(_dwc_otg_hc
+ /* /*
+ * If NYET occurred at same time as Xfer Complete * If NYET occurred at same time as Xfer Complete
+ * handled by the Xfer Complete interrupt handler * handled by the Xfer Complete interrupt handler
+ * to call the NYET interrupt handler in this cas * to call the NYET interrupt handler in this cas
+ */ */
+ hcint.b.nyet = 0; hcint.b.nyet = 0;
+ } }
+ if (hcint.b.chhltd) { if (hcint.b.chhltd) {
+ retval |= handle_hc_chhltd_intr(_dwc_otg_hcd, retval |= handle_hc_chhltd_intr(_dwc_otg_hcd,
+ } }
+ if (hcint.b.ahberr) { if (hcint.b.ahberr) {
+ retval |= handle_hc_ahberr_intr(_dwc_otg_hcd, retval |= handle_hc_ahberr_intr(_dwc_otg_hcd,
+ } }
+ if (hcint.b.stall) { if (hcint.b.stall) {
+ retval |= handle_hc_stall_intr(_dwc_otg_hcd, retval |= handle_hc_stall_intr(_dwc_otg_hcd,
+ } }
+ if (hcint.b.nak) { if (hcint.b.nak) {
+ retval |= handle_hc_nak_intr(_dwc_otg_hcd, hc retval |= handle_hc_nak_intr(_dwc_otg_hcd, hc
+ } }
+ if (hcint.b.ack) { if (hcint.b.ack) {
+ retval |= handle_hc_ack_intr(_dwc_otg_hcd, hc retval |= handle_hc_ack_intr(_dwc_otg_hcd, hc
+ } }
+ if (hcint.b.nyet) { if (hcint.b.nyet) {
+ retval |= handle_hc_nyet_intr(_dwc_otg_hcd, h retval |= handle_hc_nyet_intr(_dwc_otg_hcd, h
+ } }
+ if (hcint.b.xacterr) { if (hcint.b.xacterr) {
+ retval |= handle_hc_xacterr_intr(_dwc_otg_hcd retval |= handle_hc_xacterr_intr(_dwc_otg_hcd
+ } }
+ if (hcint.b.bblerr) { if (hcint.b.bblerr) {
+ retval |= handle_hc_babble_intr(_dwc_otg_hcd, retval |= handle_hc_babble_intr(_dwc_otg_hcd,
+ } }
+ if (hcint.b.frmovrun) { if (hcint.b.frmovrun) {
+ retval |= handle_hc_frmovrun_intr(_dwc_otg_hc retval |= handle_hc_frmovrun_intr(_dwc_otg_hc
+ } }
+ if (hcint.b.datatglerr) { if (hcint.b.datatglerr) {
+ retval |= handle_hc_datatglerr_intr(_dwc_otg_ retval |= handle_hc_datatglerr_intr(_dwc_otg_
+ } }
+ /* /*
+ * Logic to free the qtd here, at the end of the hc i * Logic to free the qtd here, at the end of the hc i
+ * processing, if the handling of this interrupt dete * processing, if the handling of this interrupt dete
+ * that it needs to be freed. * that it needs to be freed.
+ */ */
+ if (must_free) { if (must_free) {
+ /* Free the qtd here now that we are done usi /* Free the qtd here now that we are done usi
+ dwc_otg_hcd_qtd_free(qtd); dwc_otg_hcd_qtd_free(qtd);
+ } }
+ return retval; return retval;
+} }
+
+#endif /* DWC_DEVICE_ONLY */ #endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_queue.c.sdiff b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_queue.c.sdiff
new file mode 100644
index 00000000000..07cd43261b6
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/sdiff/dwc_otg_hcd_queue.c.sdiff
@@ -0,0 +1,821 @@
+/* ========================================================== /* ==========================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers
+ * $Revision: #4 $ * $Revision: #4 $
+ * $Date: 2005/09/15 $ * $Date: 2005/09/15 $
+ * $Change: 537387 $ * $Change: 537387 $
+ * *
+ * Synopsys HS OTG Linux Software Driver and documentation (h * Synopsys HS OTG Linux Software Driver and documentation (h
+ * "Software") is an Unsupported proprietary work of Synopsys * "Software") is an Unsupported proprietary work of Synopsys
+ * otherwise expressly agreed to in writing between Synopsys * otherwise expressly agreed to in writing between Synopsys
+ * *
+ * The Software IS NOT an item of Licensed Software or Licens * The Software IS NOT an item of Licensed Software or Licens
+ * any End User Software License Agreement or Agreement for L * any End User Software License Agreement or Agreement for L
+ * with Synopsys or any supplement thereto. You are permitted * with Synopsys or any supplement thereto. You are permitted
+ * redistribute this Software in source and binary forms, wit * redistribute this Software in source and binary forms, wit
+ * modification, provided that redistributions of source code * modification, provided that redistributions of source code
+ * notice. You may not view, use, disclose, copy or distribut * notice. You may not view, use, disclose, copy or distribut
+ * any information contained herein except pursuant to this l * any information contained herein except pursuant to this l
+ * Synopsys. If you do not agree with this notice, including * Synopsys. If you do not agree with this notice, including
+ * below, then you are not authorized to use the Software. * below, then you are not authorized to use the Software.
+ * *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON A
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PA
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABL
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARI
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE PO
+ * DAMAGE. * DAMAGE.
+ * ========================================================== * ==========================================================
+
+#ifndef CONFIG_DWC_DEVICE_ONLY #ifndef CONFIG_DWC_DEVICE_ONLY
+
+/** /**
+ * @file * @file
+ * *
+ * This file contains the functions to manage Queue Heads and * This file contains the functions to manage Queue Heads and
+ * Transfer Descriptors. * Transfer Descriptors.
+ */ */
+#include <linux/kernel.h> #include <linux/kernel.h>
+#include <linux/module.h> #include <linux/module.h>
+#include <linux/moduleparam.h> #include <linux/moduleparam.h>
+#include <linux/init.h> #include <linux/init.h>
+#include <linux/device.h> #include <linux/device.h>
+#include <linux/errno.h> #include <linux/errno.h>
+#include <linux/list.h> #include <linux/list.h>
+#include <linux/interrupt.h> #include <linux/interrupt.h>
+#include <linux/string.h> #include <linux/string.h>
+
+#include "dwc_otg_driver.h" #include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h" #include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h" #include "dwc_otg_regs.h"
+
+/** /**
+ * This function allocates and initializes a QH. * This function allocates and initializes a QH.
+ * *
+ * @param _hcd The HCD state structure for the DWC OTG contro * @param _hcd The HCD state structure for the DWC OTG contro
+ * @param[in] _urb Holds the information about the device/end * @param[in] _urb Holds the information about the device/end
+ * to initialize the QH. * to initialize the QH.
+ * *
+ * @return Returns pointer to the newly allocated QH, or NULL * @return Returns pointer to the newly allocated QH, or NULL
+dwc_otg_qh_t * dwc_otg_hcd_qh_create(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * dwc_otg_hcd_qh_create(dwc_otg_hcd_t * _hcd,
+ struct urb * _urb) struct urb * _urb)
+{ {
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+
+ /* Allocate memory */ /* Allocate memory */
+ /** @todo add memflags argument */ /** @todo add memflags argument */
+ qh = dwc_otg_hcd_qh_alloc(); qh = dwc_otg_hcd_qh_alloc();
+ if (qh == NULL) { if (qh == NULL) {
+ return NULL; return NULL;
+ } }
+ dwc_otg_hcd_qh_init(_hcd, qh, _urb); dwc_otg_hcd_qh_init(_hcd, qh, _urb);
+ return qh; return qh;
+} }
+
+/** Free each QTD in the QH's QTD-list then free the QH. QH /** Free each QTD in the QH's QTD-list then free the QH. QH
+ * removed from a list. QTD list should already be empty if * removed from a list. QTD list should already be empty if
+ * Dequeue. * Dequeue.
+ * *
+ * @param[in] _qh The QH to free. * @param[in] _qh The QH to free.
+ */ */
+void dwc_otg_hcd_qh_free(dwc_otg_qh_t * _qh) void dwc_otg_hcd_qh_free(dwc_otg_qh_t * _qh)
+{ {
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+ struct list_head *pos; struct list_head *pos;
+ unsigned long flags; unsigned long flags;
+
+ /* Free each QTD in the QTD list */ /* Free each QTD in the QTD list */
+ local_irq_save(flags); local_irq_save(flags);
+ for (pos = _qh->qtd_list.next; pos != &_qh->qtd_list; for (pos = _qh->qtd_list.next; pos != &_qh->qtd_list;
+ pos = _qh->qtd_list.next) { pos = _qh->qtd_list.next) {
+ list_del(pos); list_del(pos);
+ qtd = dwc_list_to_qtd(pos); qtd = dwc_list_to_qtd(pos);
+ dwc_otg_hcd_qtd_free(qtd); dwc_otg_hcd_qtd_free(qtd);
+ } }
+ local_irq_restore(flags); local_irq_restore(flags);
+ kfree(_qh); kfree(_qh);
+ return; return;
+} }
+
+/** Initializes a QH structure. /** Initializes a QH structure.
+ * *
+ * @param[in] _hcd The HCD state structure for the DWC OTG co * @param[in] _hcd The HCD state structure for the DWC OTG co
+ * @param[in] _qh The QH to init. * @param[in] _qh The QH to init.
+ * @param[in] _urb Holds the information about the device/end * @param[in] _urb Holds the information about the device/end
+ * to initialize the QH. */ * to initialize the QH. */
+#define SCHEDULE_SLOP 10 #define SCHEDULE_SLOP 10
+void dwc_otg_hcd_qh_init(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * void dwc_otg_hcd_qh_init(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t *
+ struct urb *_urb) struct urb *_urb)
+{ {
+ memset(_qh, 0, sizeof(dwc_otg_qh_t)); memset(_qh, 0, sizeof(dwc_otg_qh_t));
+
+ /* Initialize QH */ /* Initialize QH */
+ switch (usb_pipetype(_urb->pipe)) { switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL: case PIPE_CONTROL:
+ _qh->ep_type = USB_ENDPOINT_XFER_CONTROL; _qh->ep_type = USB_ENDPOINT_XFER_CONTROL;
+ break; break;
+ case PIPE_BULK: case PIPE_BULK:
+ _qh->ep_type = USB_ENDPOINT_XFER_BULK; _qh->ep_type = USB_ENDPOINT_XFER_BULK;
+ break; break;
+ case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
+ _qh->ep_type = USB_ENDPOINT_XFER_ISOC; _qh->ep_type = USB_ENDPOINT_XFER_ISOC;
+ break; break;
+ case PIPE_INTERRUPT: case PIPE_INTERRUPT:
+ _qh->ep_type = USB_ENDPOINT_XFER_INT; _qh->ep_type = USB_ENDPOINT_XFER_INT;
+ break; break;
+ } }
+ _qh->ep_is_in = usb_pipein(_urb->pipe) ? 1 : 0; _qh->ep_is_in = usb_pipein(_urb->pipe) ? 1 : 0;
+ _qh->data_toggle = DWC_OTG_HC_PID_DATA0; _qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+ _qh->maxp = usb_maxpacket(_urb->dev, _urb->pipe, !(us _qh->maxp = usb_maxpacket(_urb->dev, _urb->pipe, !(us
+ INIT_LIST_HEAD(&_qh->qtd_list); INIT_LIST_HEAD(&_qh->qtd_list);
+ INIT_LIST_HEAD(&_qh->qh_list_entry); INIT_LIST_HEAD(&_qh->qh_list_entry);
+ _qh->channel = NULL; _qh->channel = NULL;
+
+ /* FS/LS Enpoint on HS Hub /* FS/LS Enpoint on HS Hub
+ * NOT virtual root hub */ * NOT virtual root hub */
+ _qh->do_split = 0; _qh->do_split = 0;
+ _qh->speed = _urb->dev->speed; <
+ <
+ if (((_urb->dev->speed == USB_SPEED_LOW) || if (((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL)) && (_urb->dev->speed == USB_SPEED_FULL)) &&
+ (_urb->dev->tt) && (_urb->dev->tt->hub) && (_ (_urb->dev->tt) && (_urb->dev->tt->hub) && (_
+ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT foun DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT foun
+ usb_pipeendpoint(_urb->pipe), _urb->dev- usb_pipeendpoint(_urb->pipe), _urb->dev-
+ _qh->do_split = 1; _qh->do_split = 1;
+ } }
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT if (_qh->ep_type == USB_ENDPOINT_XFER_INT
+ || _qh->ep_type == USB_ENDPOINT_XFER_ISOC) { || _qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
+
+ /* Compute scheduling parameters once and sav /* Compute scheduling parameters once and sav
+ hprt0_data_t hprt; hprt0_data_t hprt;
+
+ /** @todo Account for split transfers in the /** @todo Account for split transfers in the
+ int bytecount = dwc_hb_mult(_qh->maxp) * dwc_ int bytecount = dwc_hb_mult(_qh->maxp) * dwc_
+ _qh->usecs = NS_TO_US(usb_calc_bus_time(_urb- _qh->usecs = NS_TO_US(usb_calc_bus_time(_urb-
+ usb_p usb_p
+ (_qh->ep_type == USB_ (_qh->ep_type == USB_
+
+ /* Start in a slightly future (micro)frame. * /* Start in a slightly future (micro)frame. *
+ _qh->sched_frame = dwc_frame_num_inc(_hcd->fr _qh->sched_frame = dwc_frame_num_inc(_hcd->fr
+ _qh->interval = _urb->interval; _qh->interval = _urb->interval;
+
+#if 0 #if 0
+ /* Increase interrupt polling rate for de /* Increase interrupt polling rate for de
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT if (_qh->ep_type == USB_ENDPOINT_XFER_INT
+ _qh->interval = 8; _qh->interval = 8;
+ } }
+
+#endif /* */ #endif /* */
+ hprt.d32 = dwc_read_reg32(_hcd->core_if->host_if- hprt.d32 = dwc_read_reg32(_hcd->core_if->host_if-
+ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_S if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_S
+ ((_urb->dev->speed == USB_SPEED_LOW) ((_urb->dev->speed == USB_SPEED_LOW)
+ (_urb->dev->speed == USB_SPEED_FULL)) (_urb->dev->speed == USB_SPEED_FULL))
+ _qh->interval *= 8; _qh->interval *= 8;
+ _qh->sched_frame |= 0x7; _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh->sched_f _qh->start_split_frame = _qh->sched_f
+ } }
+ } }
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n",
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Addre DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Addre
+ _urb->dev->devnum); _urb->dev->devnum);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d,
+ usb_pipeendpoint(_urb->pipe), usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) == USB_DIR_IN ? "I usb_pipein(_urb->pipe) == USB_DIR_IN ? "I
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n
+ char *speed; char *speed;
+ switch(_urb->dev->speed) { switch(_urb->dev->speed) {
+ case USB_SPEED_LOW: case USB_SPEED_LOW:
+ speed = "low"; break; speed = "low"; break;
+ case USB_SPEED_FULL: case USB_SPEED_FULL:
+ speed = "full"; break; speed = "full"; break;
+ case USB_SPEED_HIGH: case USB_SPEED_HIGH:
+ speed = "high"; break; speed = "high"; break;
+ default: default:
+ speed = "?"; speed = "?";
+ break; break;
+ }; };
+ speed; speed;
+ } )) ; } )) ;
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n" DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n"
+ char *type; char *type;
+ switch (_qh->ep_type) { switch (_qh->ep_type) {
+ case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_ISOC:
+ type = "isochronous"; break; type = "isochronous"; break;
+ case USB_ENDPOINT_XFER_INT: case USB_ENDPOINT_XFER_INT:
+ type = "interrupt"; break; type = "interrupt"; break;
+ case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_CONTROL:
+ type = "control"; break; type = "control"; break;
+ case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_BULK:
+ type = "bulk"; break; type = "bulk"; break;
+ default: default:
+ type = "?";break; type = "?";break;
+ }; };
+ type; type;
+ } )) ; } )) ;
+
+#ifdef CONFIG_DWC_DEBUG #ifdef CONFIG_DWC_DEBUG
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs
+ _qh->usecs); _qh->usecs);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - inter DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - inter
+ _qh->interval); _qh->interval);
+ } }
+
+#endif /* */ #endif /* */
+ return; return;
+} }
+
+/** /**
+ * Microframe scheduler | * Checks that a channel is available for a periodic transfer
+ * track the total use in hcd->frame_usecs | *
+ * keep each qh use in qh->frame_usecs | * @return 0 if successful, negative error code otherise.
+ * when surrendering the qh then donate the time back <
+ */ <
+const unsigned short max_uframe_usecs[]={ 100, 100, 100, 100, <
+ <
+/* <
+ * called from dwc_otg_hcd.c:dwc_otg_hcd_init <
+ */ */
+int init_hcd_usecs(dwc_otg_hcd_t *_hcd) | static int periodic_channel_available(dwc_otg_hcd_t * _hcd)
+{ <
+ int i; <
+ for (i=0; i<8; i++) { <
+ _hcd->frame_usecs[i] = max_uframe_usecs[i]; <
+ } <
+ return 0; <
+} <
+ <
+static int find_single_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_q <
+{ {
+ int i; | /*
+ unsigned short utime; | * Currently assuming that there is a dedicated host
+ int t_left; | * periodic transaction plus at least one host channe
+ int ret; | * non-periodic transactions.
+ int done; | */
+ | int status;
+ ret = -1; | int num_channels;
+ utime = _qh->usecs; | num_channels = _hcd->core_if->core_params->host_chann
+ t_left = utime; | if ((_hcd->periodic_channels + _hcd->non_periodic_cha
+ i = 0; | num_channels) && (_hcd->periodic_channels < num
+ done = 0; | status = 0;
+ while (done == 0) { | } else {
+ /* At the start _hcd->frame_usecs[i] = max_uf | DWC_NOTICE("%s: Total channels: %d, Periodic:
+ if (utime <= _hcd->frame_usecs[i]) { | __func__, num_channels, _hcd->periodic_c
+ _hcd->frame_usecs[i] -= utime; | _hcd->non_periodic_channels);
+ _qh->frame_usecs[i] += utime; | status = -ENOSPC;
+ t_left -= utime; <
+ ret = i; <
+ done = 1; <
+ return ret; <
+ } else { <
+ i++; <
+ if (i == 8) { <
+ done = 1; <
+ ret = -1; <
+ } <
+ } <
+ } }
+ return ret; | return status;
+} }
+
+/* | /**
+ * use this for FS apps that can span multiple uframes | * Checks that there is sufficient bandwidth for the specifie
+ > * periodic schedule. For simplicity, this calculation assume
+ > * transfers in the periodic schedule may occur in the same (
+ > *
+ > * @param _hcd The HCD state structure for the DWC OTG contro
+ > * @param _qh QH containing periodic bandwidth required.
+ > *
+ > * @return 0 if successful, negative error code otherwise.
+ */ */
+static int find_multi_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh | static int check_periodic_bandwidth(dwc_otg_hcd_t * _hcd, dwc
+{ {
+ int i; | int status;
+ int j; | uint16_t max_claimed_usecs;
+ unsigned short utime; | status = 0;
+ int t_left; | if (_hcd->core_if->core_params->speed == DWC_SPEED_PA
+ int ret; <
+ int done; <
+ unsigned short xtime; <
+ <
+ ret = -1; <
+ utime = _qh->usecs; <
+ t_left = utime; <
+ i = 0; <
+ done = 0; <
+loop: <
+ while (done == 0) { <
+ if(_hcd->frame_usecs[i] <= 0) { <
+ i++; <
+ if (i == 8) { <
+ done = 1; <
+ ret = -1; <
+ } <
+ goto loop; <
+ } <
+ <
+ /* /*
+ * we need n consequtive slots | * High speed mode.
+ * so use j as a start slot j plus j+1 must b | * Max periodic usecs is 80% x 125 usec = 100
+ */ */
+ xtime= _hcd->frame_usecs[i]; | max_claimed_usecs = 100 - _qh->usecs;
+ for (j = i+1 ; j < 8 ; j++ ) { <
+ /* <
+ * if we add this frame remaining tim <
+ * be OK, if not we need to test j fo <
+ */ <
+ if ((xtime+_hcd->frame_usecs[j]) < ut <
+ if (_hcd->frame_usecs[j] < ma <
+ j = 8; <
+ ret = -1; <
+ continue; <
+ } <
+ } <
+ if (xtime >= utime) { <
+ ret = i; <
+ j = 8; /* stop loop with a g <
+ continue; <
+ } <
+ /* add the frame time to x time */ <
+ xtime += _hcd->frame_usecs[j]; <
+ /* we must have a fully available nex <
+ if ((xtime < utime) <
+ && (_hcd->frame_usecs[j] == max_u <
+ ret = -1; <
+ j = 8; /* stop loop with a b <
+ continue; <
+ } <
+ } <
+ if (ret >= 0) { <
+ t_left = utime; <
+ for (j = i; (t_left>0) && (j < 8); j+ <
+ t_left -= _hcd->frame_usecs[j <
+ if ( t_left <= 0 ) { <
+ _qh->frame_usecs[j] + <
+ _hcd->frame_usecs[j]= <
+ ret = i; <
+ done = 1; <
+ } else { <
+ _qh->frame_usecs[j] + <
+ _hcd->frame_usecs[j] <
+ } <
+ } <
+ } else { <
+ i++; <
+ if (i == 8) { <
+ done = 1; <
+ ret = -1; <
+ } <
+ } <
+ } <
+ return ret; <
+} <
+ <
+static int find_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _ <
+{ <
+ int ret; <
+ ret = -1; <
+ <
+ if (_qh->speed == USB_SPEED_HIGH) { <
+ /* if this is a hs transaction we need a full <
+ ret = find_single_uframe(_hcd, _qh); <
+ } else { } else {
+ /* if this is a fs transaction we may need a | /*
+ ret = find_multi_uframe(_hcd, _qh); | * Full speed mode.
+ > * Max periodic usecs is 90% x 1000 usec = 90
+ > */
+ > max_claimed_usecs = 900 - _qh->usecs;
+ } }
+ return ret; | if (_hcd->periodic_usecs > max_claimed_usecs) {
+ > #undef USB_DWC_OTG_IGNORE_BANDWIDTH
+ > #ifndef USB_DWC_OTG_IGNORE_BANDWIDTH
+ > DWC_NOTICE("%s: already claimed usecs %d, req
+ > __func__, _hcd->periodic_usecs, _
+ > status = -ENOSPC;
+ > #else
+ > status = 0;
+ > #endif
+ > }
+ > return status;
+} }
+
+/** /**
+ * Checks that the max transfer size allowed in a host channe * Checks that the max transfer size allowed in a host channe
+ * to handle the maximum data transfer in a single (micro)fra * to handle the maximum data transfer in a single (micro)fra
+ * transfer. * transfer.
+ * *
+ * @param _hcd The HCD state structure for the DWC OTG contro * @param _hcd The HCD state structure for the DWC OTG contro
+ * @param _qh QH for a periodic endpoint. * @param _qh QH for a periodic endpoint.
+ * *
+ * @return 0 if successful, negative error code otherwise. * @return 0 if successful, negative error code otherwise.
+ */ */
+static int check_max_xfer_size(dwc_otg_hcd_t * _hcd, dwc_otg_ static int check_max_xfer_size(dwc_otg_hcd_t * _hcd, dwc_otg_
+{ {
+ int status; int status;
+ uint32_t max_xfer_size; uint32_t max_xfer_size;
+ uint32_t max_channel_xfer_size; uint32_t max_channel_xfer_size;
+ status = 0; status = 0;
+ max_xfer_size = dwc_max_packet(_qh->maxp) * dwc_hb_mu max_xfer_size = dwc_max_packet(_qh->maxp) * dwc_hb_mu
+ max_channel_xfer_size = _hcd->core_if->core_params->m max_channel_xfer_size = _hcd->core_if->core_params->m
+ if (max_xfer_size > max_channel_xfer_size) { if (max_xfer_size > max_channel_xfer_size) {
+ DWC_NOTICE("%s: Periodic xfer length %d > " DWC_NOTICE("%s: Periodic xfer length %d > "
+ "max xfer length for channel %d\n "max xfer length for channel %d\n
+ max_xfer_size, max_channel_xfer_s max_xfer_size, max_channel_xfer_s
+ status = -ENOSPC; status = -ENOSPC;
+ } }
+ return status; return status;
+} }
+
+/** /**
+ * Schedules an interrupt or isochronous transfer in the peri * Schedules an interrupt or isochronous transfer in the peri
+ * *
+ * @param _hcd The HCD state structure for the DWC OTG contro * @param _hcd The HCD state structure for the DWC OTG contro
+ * @param _qh QH for the periodic transfer. The QH should alr * @param _qh QH for the periodic transfer. The QH should alr
+ * scheduling information. * scheduling information.
+ * *
+ * @return 0 if successful, negative error code otherwise. * @return 0 if successful, negative error code otherwise.
+ */ */
+static int schedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh static int schedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh
+{ {
+ int status = 0; int status = 0;
+ int frame; | status = periodic_channel_available(_hcd);
+ status = find_uframe(_hcd, _qh); | if (status) {
+ frame = -1; | DWC_NOTICE("%s: No host channel available for
+ if (status == 0) { | "transfer.\n", __func__);
+ frame = 7; | return status;
+ } else { <
+ if (status > 0 ) <
+ frame = status-1; <
+ } <
+ /* Set the new frame up */ <
+ if (frame > -1) { <
+ _qh->sched_frame &= ~0x7; <
+ _qh->sched_frame |= (frame & 7); <
+ } }
+ | status = check_periodic_bandwidth(_hcd, _qh);
+ if (status != -1 ) <
+ status = 0; <
+ <
+ if (status) { if (status) {
+ DWC_NOTICE("%s: Insufficient periodic bandwid DWC_NOTICE("%s: Insufficient periodic bandwid
+ "periodic transfer.\n", __func__) "periodic transfer.\n", __func__)
+ return status; return status;
+ } }
+ status = check_max_xfer_size(_hcd, _qh); status = check_max_xfer_size(_hcd, _qh);
+ if (status) { if (status) {
+ DWC_NOTICE("%s: Channel max transfer size too DWC_NOTICE("%s: Channel max transfer size too
+ "for periodic transfer.\n", __fun "for periodic transfer.\n", __fun
+ return status; return status;
+ } }
+
+ /* Always start in the inactive schedule. */ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sc list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sc
+
+ > /* Reserve the periodic channel. */
+ > _hcd->periodic_channels++;
+ >
+ /* Update claimed usecs per (micro)frame. */ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs += _qh->usecs; _hcd->periodic_usecs += _qh->usecs;
+
+ /* Update average periodic bandwidth claimed and # pe /* Update average periodic bandwidth claimed and # pe
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_alloc hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_alloc
+ _qh->usecs / _qh->usecs /
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid
+ DWC_DEBUGPL(DBG_HCD, DWC_DEBUGPL(DBG_HCD,
+ "Scheduled intr: qh %p, usecs %d "Scheduled intr: qh %p, usecs %d
+ _qh, _qh->usecs, _qh->interval); _qh, _qh->usecs, _qh->interval);
+ } else { } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid
+ DWC_DEBUGPL(DBG_HCD, DWC_DEBUGPL(DBG_HCD,
+ "Scheduled isoc: qh %p, usecs %d "Scheduled isoc: qh %p, usecs %d
+ _qh, _qh->usecs, _qh->interval); _qh, _qh->usecs, _qh->interval);
+ } }
+ return status; return status;
+} }
+
+/** /**
+ * This function adds a QH to either the non periodic or peri * This function adds a QH to either the non periodic or peri
+ * it is not already in the schedule. If the QH is already in * it is not already in the schedule. If the QH is already in
+ * action is taken. * action is taken.
+ * *
+ * @return 0 if successful, negative error code otherwise. * @return 0 if successful, negative error code otherwise.
+ */ */
+int dwc_otg_hcd_qh_add(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _ int dwc_otg_hcd_qh_add(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _
+{ {
+ unsigned long flags; unsigned long flags;
+ int status = 0; int status = 0;
+ local_irq_save(flags); local_irq_save(flags);
+ if (!list_empty(&_qh->qh_list_entry)) { if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */ /* QH already in a schedule. */
+ goto done; goto done;
+ } }
+
+ /* Add the new QH to the appropriate schedule */ /* Add the new QH to the appropriate schedule */
+ if (dwc_qh_is_non_per(_qh)) { if (dwc_qh_is_non_per(_qh)) {
+ /* Always start in the inactive schedule. */ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry, list_add_tail(&_qh->qh_list_entry,
+ &_hcd->non_periodic_sched_i &_hcd->non_periodic_sched_i
+ } else { } else {
+ status = schedule_periodic(_hcd, _qh); status = schedule_periodic(_hcd, _qh);
+ } }
+
+done:local_irq_restore(flags); done:local_irq_restore(flags);
+ return status; return status;
+} }
+/** /**
+ * This function adds a QH to the non periodic deferred sched * This function adds a QH to the non periodic deferred sched
+ * *
+ * @return 0 if successful, negative error code otherwise. * @return 0 if successful, negative error code otherwise.
+ */ */
+int dwc_otg_hcd_qh_add_deferred(dwc_otg_hcd_t * _hcd, dwc_otg int dwc_otg_hcd_qh_add_deferred(dwc_otg_hcd_t * _hcd, dwc_otg
+{ {
+ unsigned long flags; unsigned long flags;
+ local_irq_save(flags); local_irq_save(flags);
+ if (!list_empty(&_qh->qh_list_entry)) { if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */ /* QH already in a schedule. */
+ goto done; goto done;
+ } }
+
+ /* Add the new QH to the non periodic deferred schedu /* Add the new QH to the non periodic deferred schedu
+ if (dwc_qh_is_non_per(_qh)) { if (dwc_qh_is_non_per(_qh)) {
+ list_add_tail(&_qh->qh_list_entry, list_add_tail(&_qh->qh_list_entry,
+ &_hcd->non_periodic_sched_defer &_hcd->non_periodic_sched_defer
+ } }
+done: done:
+ local_irq_restore(flags); local_irq_restore(flags);
+ return 0; return 0;
+} }
+
+/** /**
+ * Removes an interrupt or isochronous transfer from the peri * Removes an interrupt or isochronous transfer from the peri
+ * *
+ * @param _hcd The HCD state structure for the DWC OTG contro * @param _hcd The HCD state structure for the DWC OTG contro
+ * @param _qh QH for the periodic transfer. * @param _qh QH for the periodic transfer.
+ */ */
+static void deschedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg static void deschedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg
+{ {
+ int i; <
+ list_del_init(&_qh->qh_list_entry); list_del_init(&_qh->qh_list_entry);
+
+ > /* Release the periodic channel reservation. */
+ > _hcd->periodic_channels--;
+ >
+ /* Update claimed usecs per (micro)frame. */ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs -= _qh->usecs; _hcd->periodic_usecs -= _qh->usecs;
+
+ for (i = 0; i < 8; i++) { <
+ _hcd->frame_usecs[i] += _qh->frame_usecs[i]; <
+ _qh->frame_usecs[i] = 0; <
+ } <
+ <
+ /* Update average periodic bandwidth claimed and # pe /* Update average periodic bandwidth claimed and # pe
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_alloc hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_alloc
+ _qh->usecs / _qh->int _qh->usecs / _qh->int
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) { if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid
+ DWC_DEBUGPL(DBG_HCD, DWC_DEBUGPL(DBG_HCD,
+ "Descheduled intr: qh %p, usecs "Descheduled intr: qh %p, usecs
+ _qh, _qh->usecs, _qh->interval); _qh, _qh->usecs, _qh->interval);
+ } else { } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwid
+ DWC_DEBUGPL(DBG_HCD, DWC_DEBUGPL(DBG_HCD,
+ "Descheduled isoc: qh %p, usecs "Descheduled isoc: qh %p, usecs
+ _qh, _qh->usecs, _qh->interval); _qh, _qh->usecs, _qh->interval);
+ } }
+} }
+
+/** /**
+ * Removes a QH from either the non-periodic or periodic sche * Removes a QH from either the non-periodic or periodic sche
+ * not freed. * not freed.
+ * *
+ * @param[in] _hcd The HCD state structure. * @param[in] _hcd The HCD state structure.
+ * @param[in] _qh QH to remove from schedule. */ * @param[in] _qh QH to remove from schedule. */
+void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t
+{ {
+ unsigned long flags; unsigned long flags;
+ local_irq_save(flags); local_irq_save(flags);
+ if (list_empty(&_qh->qh_list_entry)) { if (list_empty(&_qh->qh_list_entry)) {
+ /* QH is not in a schedule. */ /* QH is not in a schedule. */
+ goto done; goto done;
+ } }
+ if (dwc_qh_is_non_per(_qh)) { if (dwc_qh_is_non_per(_qh)) {
+ if (_hcd->non_periodic_qh_ptr == &_qh->qh_lis if (_hcd->non_periodic_qh_ptr == &_qh->qh_lis
+ _hcd->non_periodic_qh_ptr = _hcd->non _hcd->non_periodic_qh_ptr = _hcd->non
+ } }
+ list_del_init(&_qh->qh_list_entry); list_del_init(&_qh->qh_list_entry);
+ } else { } else {
+ deschedule_periodic(_hcd, _qh); deschedule_periodic(_hcd, _qh);
+ } }
+
+done:local_irq_restore(flags); done:local_irq_restore(flags);
+} }
+
+/** /**
+ * Defers a QH. For non-periodic QHs, removes the QH from the * Defers a QH. For non-periodic QHs, removes the QH from the
+ * non-periodic schedule. The QH is added to the deferred non * non-periodic schedule. The QH is added to the deferred non
+ * schedule if any QTDs are still attached to the QH. * schedule if any QTDs are still attached to the QH.
+ */ */
+int dwc_otg_hcd_qh_deferr(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t int dwc_otg_hcd_qh_deferr(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t
+{ {
+ int deact = 1; int deact = 1;
+ unsigned long flags; unsigned long flags;
+ local_irq_save(flags); local_irq_save(flags);
+ if (dwc_qh_is_non_per(_qh)) { if (dwc_qh_is_non_per(_qh)) {
+ _qh->sched_frame = _qh->sched_frame =
+ dwc_frame_num_inc(_hcd->frame_number, dwc_frame_num_inc(_hcd->frame_number,
+ delay); delay);
+ _qh->channel = NULL; _qh->channel = NULL;
+ _qh->qtd_in_process = NULL; _qh->qtd_in_process = NULL;
+ deact = 0; deact = 0;
+ dwc_otg_hcd_qh_remove(_hcd, _qh); dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) { if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to deferred non-periodic /* Add back to deferred non-periodic
+ dwc_otg_hcd_qh_add_deferred(_hcd, _qh dwc_otg_hcd_qh_add_deferred(_hcd, _qh
+ } }
+ } }
+ local_irq_restore(flags); local_irq_restore(flags);
+ return deact; return deact;
+} }
+/** /**
+ * Deactivates a QH. For non-periodic QHs, removes the QH fro * Deactivates a QH. For non-periodic QHs, removes the QH fro
+ * non-periodic schedule. The QH is added to the inactive non * non-periodic schedule. The QH is added to the inactive non
+ * schedule if any QTDs are still attached to the QH. * schedule if any QTDs are still attached to the QH.
+ * *
+ * For periodic QHs, the QH is removed from the periodic queu * For periodic QHs, the QH is removed from the periodic queu
+ * there are any QTDs still attached to the QH, the QH is add * there are any QTDs still attached to the QH, the QH is add
+ * periodic inactive schedule or the periodic ready schedule * periodic inactive schedule or the periodic ready schedule
+ * scheduled frame is calculated. The QH is placed in the rea * scheduled frame is calculated. The QH is placed in the rea
+ * the scheduled frame has been reached already. Otherwise it * the scheduled frame has been reached already. Otherwise it
+ * inactive schedule. If there are no QTDs attached to the QH * inactive schedule. If there are no QTDs attached to the QH
+ * completely removed from the periodic schedule. * completely removed from the periodic schedule.
+ */ */
+void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t * _hcd, dwc_otg_ void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t * _hcd, dwc_otg_
+ int sched_next_periodic_split) int sched_next_periodic_split)
+{ {
+ unsigned long flags; unsigned long flags;
+ local_irq_save(flags); local_irq_save(flags);
+ if (dwc_qh_is_non_per(_qh)) { if (dwc_qh_is_non_per(_qh)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh); dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) { if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to inactive non-periodic /* Add back to inactive non-periodic
+ dwc_otg_hcd_qh_add(_hcd, _qh); dwc_otg_hcd_qh_add(_hcd, _qh);
+ } }
+ } else { } else {
+ uint16_t frame_number = uint16_t frame_number =
+ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_
+ if (_qh->do_split) { if (_qh->do_split) {
+ /* Schedule the next continuing perio /* Schedule the next continuing perio
+ if (sched_next_periodic_split) { if (sched_next_periodic_split) {
+ _qh->sched_frame = frame_numb _qh->sched_frame = frame_numb
+ if (dwc_frame_num_le(frame_nu if (dwc_frame_num_le(frame_nu
+ dwc_frame_num_inc(_qh dwc_frame_num_inc(_qh
+ /* /*
+ * Allow one frame to * Allow one frame to
+ * split microframe b * split microframe b
+ * complete split, bu * complete split, bu
+ * doing the next sta * doing the next sta
+ * same frame for an * same frame for an
+ */ */
+ if ((_qh->ep_type != if ((_qh->ep_type !=
+ || (_qh->ep_i || (_qh->ep_i
+ _qh->sched_fr _qh->sched_fr
+ } }
+ } }
+ } else { } else {
+ _qh->sched_frame = dwc_frame_ _qh->sched_frame = dwc_frame_
+ _qh->in _qh->in
+ if (dwc_frame_num_le(_qh->sch if (dwc_frame_num_le(_qh->sch
+ _qh->sched_frame = fr _qh->sched_frame = fr
+ } }
+ _qh->sched_frame |= 0x7; _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh- _qh->start_split_frame = _qh-
+ } }
+ } else { } else {
+ _qh->sched_frame = _qh->sched_frame =
+ dwc_frame_num_inc(_qh->sched_fram dwc_frame_num_inc(_qh->sched_fram
+ if (dwc_frame_num_le(_qh->sched_frame if (dwc_frame_num_le(_qh->sched_frame
+ _qh->sched_frame = frame_numb _qh->sched_frame = frame_numb
+ } }
+ } }
+ if (list_empty(&_qh->qtd_list)) { if (list_empty(&_qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh); dwc_otg_hcd_qh_remove(_hcd, _qh);
+ } else { } else {
+ /* /*
+ * Remove from periodic_sched_queued * Remove from periodic_sched_queued
+ * appropriate queue. * appropriate queue.
+ */ */
+ if (dwc_frame_num_le(_qh->sched_frame | if (_qh->sched_frame == frame_number)
+ list_move(&_qh->qh_list_entry list_move(&_qh->qh_list_entry
+ &_hcd->periodic_sc &_hcd->periodic_sc
+ } else { } else {
+ list_move(&_qh->qh_list_entry list_move(&_qh->qh_list_entry
+ &_hcd->periodic_sc &_hcd->periodic_sc
+ } }
+ } }
+ } }
+ local_irq_restore(flags); local_irq_restore(flags);
+} }
+
+/** /**
+ * This function allocates and initializes a QTD. * This function allocates and initializes a QTD.
+ * *
+ * @param[in] _urb The URB to create a QTD from. Each URB-QT * @param[in] _urb The URB to create a QTD from. Each URB-QT
+ * pointing to each other so each pair should have a unique c * pointing to each other so each pair should have a unique c
+ * *
+ * @return Returns pointer to the newly allocated QTD, or NUL * @return Returns pointer to the newly allocated QTD, or NUL
+dwc_otg_qtd_t * dwc_otg_hcd_qtd_create(struct urb *_urb) dwc_otg_qtd_t * dwc_otg_hcd_qtd_create(struct urb *_urb)
+{ {
+ dwc_otg_qtd_t * qtd; dwc_otg_qtd_t * qtd;
+ qtd = dwc_otg_hcd_qtd_alloc(); qtd = dwc_otg_hcd_qtd_alloc();
+ if (qtd == NULL) { if (qtd == NULL) {
+ return NULL; return NULL;
+ } }
+ dwc_otg_hcd_qtd_init(qtd, _urb); dwc_otg_hcd_qtd_init(qtd, _urb);
+ return qtd; return qtd;
+} }
+
+/** /**
+ * Initializes a QTD structure. * Initializes a QTD structure.
+ * *
+ * @param[in] _qtd The QTD to initialize. * @param[in] _qtd The QTD to initialize.
+ * @param[in] _urb The URB to use for initialization. */ * @param[in] _urb The URB to use for initialization. */
+void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t * _qtd, struct urb *_ void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t * _qtd, struct urb *_
+{ {
+ memset(_qtd, 0, sizeof(dwc_otg_qtd_t)); memset(_qtd, 0, sizeof(dwc_otg_qtd_t));
+ _qtd->urb = _urb; _qtd->urb = _urb;
+ if (usb_pipecontrol(_urb->pipe)) { if (usb_pipecontrol(_urb->pipe)) {
+ /* /*
+ * The only time the QTD data toggle is used * The only time the QTD data toggle is used
+ * phase of control transfers. This phase alw * phase of control transfers. This phase alw
+ * DATA1. * DATA1.
+ */ */
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1; _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+ _qtd->control_phase = DWC_OTG_CONTROL_SETUP; _qtd->control_phase = DWC_OTG_CONTROL_SETUP;
+ } }
+
+ /* start split */ /* start split */
+ _qtd->complete_split = 0; _qtd->complete_split = 0;
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ _qtd->isoc_split_offset = 0; _qtd->isoc_split_offset = 0;
+
+ /* Store the qtd ptr in the urb to reference what QTD /* Store the qtd ptr in the urb to reference what QTD
+ _urb->hcpriv = _qtd; _urb->hcpriv = _qtd;
+ return; return;
+} }
+
+/** /**
+ * This function adds a QTD to the QTD-list of a QH. It will * This function adds a QTD to the QTD-list of a QH. It will
+ * QH to place the QTD into. If it does not find a QH, then * QH to place the QTD into. If it does not find a QH, then
+ * new QH. If the QH to which the QTD is added is not current * new QH. If the QH to which the QTD is added is not current
+ * is placed into the proper schedule based on its EP type. * is placed into the proper schedule based on its EP type.
+ * *
+ * @param[in] _qtd The QTD to add * @param[in] _qtd The QTD to add
+ * @param[in] _dwc_otg_hcd The DWC HCD structure * @param[in] _dwc_otg_hcd The DWC HCD structure
+ * *
+ * @return 0 if successful, negative error code otherwise. * @return 0 if successful, negative error code otherwise.
+ */ */
+int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t * _qtd, dwc_otg_hcd_t int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t * _qtd, dwc_otg_hcd_t
+{ {
+ struct usb_host_endpoint *ep; struct usb_host_endpoint *ep;
+ dwc_otg_qh_t * qh; dwc_otg_qh_t * qh;
+ unsigned long flags; unsigned long flags;
+ int retval = 0; int retval = 0;
+ struct urb *urb = _qtd->urb; struct urb *urb = _qtd->urb;
+ local_irq_save(flags); local_irq_save(flags);
+
+ /* /*
+ * Get the QH which holds the QTD-list to insert to. * Get the QH which holds the QTD-list to insert to.
+ * doesn't exist. * doesn't exist.
+ */ */
+ ep = dwc_urb_to_endpoint(urb); ep = dwc_urb_to_endpoint(urb);
+ qh = (dwc_otg_qh_t *) ep->hcpriv; qh = (dwc_otg_qh_t *) ep->hcpriv;
+ if (qh == NULL) { if (qh == NULL) {
+ qh = dwc_otg_hcd_qh_create(_dwc_otg_hcd, urb) qh = dwc_otg_hcd_qh_create(_dwc_otg_hcd, urb)
+ if (qh == NULL) { if (qh == NULL) {
+ retval = -1; retval = -1;
+ goto done; goto done;
+ } }
+ ep->hcpriv = qh; ep->hcpriv = qh;
+ } }
+ _qtd->qtd_qh_ptr = qh; _qtd->qtd_qh_ptr = qh;
+ retval = dwc_otg_hcd_qh_add(_dwc_otg_hcd, qh); retval = dwc_otg_hcd_qh_add(_dwc_otg_hcd, qh);
+ if (retval == 0) { if (retval == 0) {
+ list_add_tail(&_qtd->qtd_list_entry, &qh->qtd list_add_tail(&_qtd->qtd_list_entry, &qh->qtd
+ } }
+
+done: done:
+ local_irq_restore(flags); local_irq_restore(flags);
+
+ return retval; return retval;
+} }
+
+
+#endif /* DWC_DEVICE_ONLY */ #endif /* DWC_DEVICE_ONLY */
diff --git a/fs/splice.c b/fs/splice.c
index 0d810e86f6c..5a21493e9e8 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -2599,20 +2599,22 @@ static long do_splice_2(int fd_in, struct file *in, loff_t __user *off_in,
/** handle error status */
if( ret <= 0 )
{
+#ifdef DEBUG_SPLICE
printk( KERN_ERR "%s:%s:%d\n"
"sock_splice_read read error %ld.\n",
__FILE__, __FUNCTION__, __LINE__,
ret );
-
+#endif
/** fail on specific errors */
if ( ret == 0 || ! ignore_splice_error ( ret ) )
{
+#ifdef DEBUG_SPLICE
printk( KERN_ERR "%s:%s:%d\n"
"returning read error %ld "
"after reading %d out of %d bytes.\n",
__FILE__, __FUNCTION__, __LINE__,
ret, spliced_len, len );
-
+#endif
release_splice_pipebufs_special(pipe);
goto out;
}
diff --git a/images/apollo3g.dtb b/images/apollo3g.dtb
new file mode 100755
index 00000000000..0a50563489b
--- /dev/null
+++ b/images/apollo3g.dtb
Binary files differ
diff --git a/images/uImage b/images/uImage
new file mode 100755
index 00000000000..a352a3f5ad5
--- /dev/null
+++ b/images/uImage
Binary files differ
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 72fa141d17d..b66a1d32de4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -658,10 +658,13 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
{
+
+/* SH - 04/15/11 - removed print, these were filling up logs in some environments
printk(KERN_ERR "%s:%s:%d\n"
"breaking %d\n",
__FUNCTION__, __FILE__, __LINE__,
ret);
+*/
break;
}
if (sk->sk_state == TCP_CLOSE) {