diff options
Diffstat (limited to 'drivers/usb/gadget/langwell_udc.c')
-rw-r--r-- | drivers/usb/gadget/langwell_udc.c | 3373 |
1 files changed, 3373 insertions, 0 deletions
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c new file mode 100644 index 00000000000..6829d596135 --- /dev/null +++ b/drivers/usb/gadget/langwell_udc.c @@ -0,0 +1,3373 @@ +/* + * Intel Langwell USB Device Controller driver + * Copyright (C) 2008-2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + + +/* #undef DEBUG */ +/* #undef VERBOSE */ + +#if defined(CONFIG_USB_LANGWELL_OTG) +#define OTG_TRANSCEIVER +#endif + + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/ioport.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/smp_lock.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/timer.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/moduleparam.h> +#include <linux/device.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/otg.h> +#include <linux/pm.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <asm/system.h> +#include <asm/unaligned.h> + +#include "langwell_udc.h" + + +#define DRIVER_DESC "Intel Langwell USB Device Controller driver" +#define DRIVER_VERSION "16 May 2009" + +static const char driver_name[] = "langwell_udc"; +static const char driver_desc[] = DRIVER_DESC; + + +/* controller device global variable */ +static struct langwell_udc *the_controller; + +/* for endpoint 0 operations */ +static const struct usb_endpoint_descriptor +langwell_ep0_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 0, + .bmAttributes = USB_ENDPOINT_XFER_CONTROL, + .wMaxPacketSize = EP0_MAX_PKT_SIZE, +}; + + +/*-------------------------------------------------------------------------*/ +/* debugging */ + +#ifdef DEBUG +#define DBG(dev, fmt, args...) \ + pr_debug("%s %s: " fmt , driver_name, \ + pci_name(dev->pdev), ## args) +#else +#define DBG(dev, fmt, args...) \ + do { } while (0) +#endif /* DEBUG */ + + +#ifdef VERBOSE +#define VDBG DBG +#else +#define VDBG(dev, fmt, args...) \ + do { } while (0) +#endif /* VERBOSE */ + + +#define ERROR(dev, fmt, args...) \ + pr_err("%s %s: " fmt , driver_name, \ + pci_name(dev->pdev), ## args) + +#define WARNING(dev, fmt, args...) \ + pr_warning("%s %s: " fmt , driver_name, \ + pci_name(dev->pdev), ## args) + +#define INFO(dev, fmt, args...) \ + pr_info("%s %s: " fmt , driver_name, \ + pci_name(dev->pdev), ## args) + + +#ifdef VERBOSE +static inline void print_all_registers(struct langwell_udc *dev) +{ + int i; + + /* Capability Registers */ + printk(KERN_DEBUG "Capability Registers (offset: " + "0x%04x, length: 0x%08x)\n", + CAP_REG_OFFSET, + (u32)sizeof(struct langwell_cap_regs)); + printk(KERN_DEBUG "caplength=0x%02x\n", + readb(&dev->cap_regs->caplength)); + printk(KERN_DEBUG "hciversion=0x%04x\n", + readw(&dev->cap_regs->hciversion)); + printk(KERN_DEBUG "hcsparams=0x%08x\n", + readl(&dev->cap_regs->hcsparams)); + printk(KERN_DEBUG "hccparams=0x%08x\n", + readl(&dev->cap_regs->hccparams)); + printk(KERN_DEBUG "dciversion=0x%04x\n", + readw(&dev->cap_regs->dciversion)); + printk(KERN_DEBUG "dccparams=0x%08x\n", + readl(&dev->cap_regs->dccparams)); + + /* Operational Registers */ + printk(KERN_DEBUG "Operational Registers (offset: " + "0x%04x, length: 0x%08x)\n", + OP_REG_OFFSET, + (u32)sizeof(struct langwell_op_regs)); + printk(KERN_DEBUG "extsts=0x%08x\n", + readl(&dev->op_regs->extsts)); + printk(KERN_DEBUG "extintr=0x%08x\n", + readl(&dev->op_regs->extintr)); + printk(KERN_DEBUG "usbcmd=0x%08x\n", + readl(&dev->op_regs->usbcmd)); + printk(KERN_DEBUG "usbsts=0x%08x\n", + readl(&dev->op_regs->usbsts)); + printk(KERN_DEBUG "usbintr=0x%08x\n", + readl(&dev->op_regs->usbintr)); + printk(KERN_DEBUG "frindex=0x%08x\n", + readl(&dev->op_regs->frindex)); + printk(KERN_DEBUG "ctrldssegment=0x%08x\n", + readl(&dev->op_regs->ctrldssegment)); + printk(KERN_DEBUG "deviceaddr=0x%08x\n", + readl(&dev->op_regs->deviceaddr)); + printk(KERN_DEBUG "endpointlistaddr=0x%08x\n", + readl(&dev->op_regs->endpointlistaddr)); + printk(KERN_DEBUG "ttctrl=0x%08x\n", + readl(&dev->op_regs->ttctrl)); + printk(KERN_DEBUG "burstsize=0x%08x\n", + readl(&dev->op_regs->burstsize)); + printk(KERN_DEBUG "txfilltuning=0x%08x\n", + readl(&dev->op_regs->txfilltuning)); + printk(KERN_DEBUG "txttfilltuning=0x%08x\n", + readl(&dev->op_regs->txttfilltuning)); + printk(KERN_DEBUG "ic_usb=0x%08x\n", + readl(&dev->op_regs->ic_usb)); + printk(KERN_DEBUG "ulpi_viewport=0x%08x\n", + readl(&dev->op_regs->ulpi_viewport)); + printk(KERN_DEBUG "configflag=0x%08x\n", + readl(&dev->op_regs->configflag)); + printk(KERN_DEBUG "portsc1=0x%08x\n", + readl(&dev->op_regs->portsc1)); + printk(KERN_DEBUG "devlc=0x%08x\n", + readl(&dev->op_regs->devlc)); + printk(KERN_DEBUG "otgsc=0x%08x\n", + readl(&dev->op_regs->otgsc)); + printk(KERN_DEBUG "usbmode=0x%08x\n", + readl(&dev->op_regs->usbmode)); + printk(KERN_DEBUG "endptnak=0x%08x\n", + readl(&dev->op_regs->endptnak)); + printk(KERN_DEBUG "endptnaken=0x%08x\n", + readl(&dev->op_regs->endptnaken)); + printk(KERN_DEBUG "endptsetupstat=0x%08x\n", + readl(&dev->op_regs->endptsetupstat)); + printk(KERN_DEBUG "endptprime=0x%08x\n", + readl(&dev->op_regs->endptprime)); + printk(KERN_DEBUG "endptflush=0x%08x\n", + readl(&dev->op_regs->endptflush)); + printk(KERN_DEBUG "endptstat=0x%08x\n", + readl(&dev->op_regs->endptstat)); + printk(KERN_DEBUG "endptcomplete=0x%08x\n", + readl(&dev->op_regs->endptcomplete)); + + for (i = 0; i < dev->ep_max / 2; i++) { + printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n", + i, readl(&dev->op_regs->endptctrl[i])); + } +} +#endif /* VERBOSE */ + + +/*-------------------------------------------------------------------------*/ + +#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") + +#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \ + USB_DIR_IN) : ((ep)->desc->bEndpointAddress \ + & USB_DIR_IN) == USB_DIR_IN) + + +#ifdef DEBUG +static char *type_string(u8 bmAttributes) +{ + switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { + case USB_ENDPOINT_XFER_BULK: + return "bulk"; + case USB_ENDPOINT_XFER_ISOC: + return "iso"; + case USB_ENDPOINT_XFER_INT: + return "int"; + }; + + return "control"; +} +#endif + + +/* configure endpoint control registers */ +static void ep_reset(struct langwell_ep *ep, unsigned char ep_num, + unsigned char is_in, unsigned char ep_type) +{ + struct langwell_udc *dev; + u32 endptctrl; + + dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + endptctrl = readl(&dev->op_regs->endptctrl[ep_num]); + if (is_in) { /* TX */ + if (ep_num) + endptctrl |= EPCTRL_TXR; + endptctrl |= EPCTRL_TXE; + endptctrl |= ep_type << EPCTRL_TXT_SHIFT; + } else { /* RX */ + if (ep_num) + endptctrl |= EPCTRL_RXR; + endptctrl |= EPCTRL_RXE; + endptctrl |= ep_type << EPCTRL_RXT_SHIFT; + } + + writel(endptctrl, &dev->op_regs->endptctrl[ep_num]); + + VDBG(dev, "<--- %s()\n", __func__); +} + + +/* reset ep0 dQH and endptctrl */ +static void ep0_reset(struct langwell_udc *dev) +{ + struct langwell_ep *ep; + int i; + + VDBG(dev, "---> %s()\n", __func__); + + /* ep0 in and out */ + for (i = 0; i < 2; i++) { + ep = &dev->ep[i]; + ep->dev = dev; + + /* ep0 dQH */ + ep->dqh = &dev->ep_dqh[i]; + + /* configure ep0 endpoint capabilities in dQH */ + ep->dqh->dqh_ios = 1; + ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE; + + /* FIXME: enable ep0-in HW zero length termination select */ + if (is_in(ep)) + ep->dqh->dqh_zlt = 0; + ep->dqh->dqh_mult = 0; + + /* configure ep0 control registers */ + ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL); + } + + VDBG(dev, "<--- %s()\n", __func__); + return; +} + + +/*-------------------------------------------------------------------------*/ + +/* endpoints operations */ + +/* configure endpoint, making it usable */ +static int langwell_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) +{ + struct langwell_udc *dev; + struct langwell_ep *ep; + u16 max = 0; + unsigned long flags; + int retval = 0; + unsigned char zlt, ios = 0, mult = 0; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + if (!_ep || !desc || ep->desc + || desc->bDescriptorType != USB_DT_ENDPOINT) + return -EINVAL; + + if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) + return -ESHUTDOWN; + + max = le16_to_cpu(desc->wMaxPacketSize); + + /* + * disable HW zero length termination select + * driver handles zero length packet through req->req.zero + */ + zlt = 1; + + /* + * sanity check type, direction, address, and then + * initialize the endpoint capabilities fields in dQH + */ + switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { + case USB_ENDPOINT_XFER_CONTROL: + ios = 1; + break; + case USB_ENDPOINT_XFER_BULK: + if ((dev->gadget.speed == USB_SPEED_HIGH + && max != 512) + || (dev->gadget.speed == USB_SPEED_FULL + && max > 64)) { + goto done; + } + break; + case USB_ENDPOINT_XFER_INT: + if (strstr(ep->ep.name, "-iso")) /* bulk is ok */ + goto done; + + switch (dev->gadget.speed) { + case USB_SPEED_HIGH: + if (max <= 1024) + break; + case USB_SPEED_FULL: + if (max <= 64) + break; + default: + if (max <= 8) + break; + goto done; + } + break; + case USB_ENDPOINT_XFER_ISOC: + if (strstr(ep->ep.name, "-bulk") + || strstr(ep->ep.name, "-int")) + goto done; + + switch (dev->gadget.speed) { + case USB_SPEED_HIGH: + if (max <= 1024) + break; + case USB_SPEED_FULL: + if (max <= 1023) + break; + default: + goto done; + } + /* + * FIXME: + * calculate transactions needed for high bandwidth iso + */ + mult = (unsigned char)(1 + ((max >> 11) & 0x03)); + max = max & 0x8ff; /* bit 0~10 */ + /* 3 transactions at most */ + if (mult > 3) + goto done; + break; + default: + goto done; + } + + spin_lock_irqsave(&dev->lock, flags); + + /* configure endpoint capabilities in dQH */ + ep->dqh->dqh_ios = ios; + ep->dqh->dqh_mpl = cpu_to_le16(max); + ep->dqh->dqh_zlt = zlt; + ep->dqh->dqh_mult = mult; + + ep->ep.maxpacket = max; + ep->desc = desc; + ep->stopped = 0; + ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + + /* ep_type */ + ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + + /* configure endpoint control registers */ + ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type); + + DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n", + _ep->name, + ep->ep_num, + DIR_STRING(desc->bEndpointAddress), + type_string(desc->bmAttributes), + max); + + spin_unlock_irqrestore(&dev->lock, flags); +done: + VDBG(dev, "<--- %s()\n", __func__); + return retval; +} + + +/*-------------------------------------------------------------------------*/ + +/* retire a request */ +static void done(struct langwell_ep *ep, struct langwell_request *req, + int status) +{ + struct langwell_udc *dev = ep->dev; + unsigned stopped = ep->stopped; + struct langwell_dtd *curr_dtd, *next_dtd; + int i; + + VDBG(dev, "---> %s()\n", __func__); + + /* remove the req from ep->queue */ + list_del_init(&req->queue); + + if (req->req.status == -EINPROGRESS) + req->req.status = status; + else + status = req->req.status; + + /* free dTD for the request */ + next_dtd = req->head; + for (i = 0; i < req->dtd_count; i++) { + curr_dtd = next_dtd; + if (i != req->dtd_count - 1) + next_dtd = curr_dtd->next_dtd_virt; + dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma); + } + + if (req->mapped) { + dma_unmap_single(&dev->pdev->dev, req->req.dma, req->req.length, + is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + req->req.dma = DMA_ADDR_INVALID; + req->mapped = 0; + } else + dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma, + req->req.length, + is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (status != -ESHUTDOWN) + DBG(dev, "complete %s, req %p, stat %d, len %u/%u\n", + ep->ep.name, &req->req, status, + req->req.actual, req->req.length); + + /* don't modify queue heads during completion callback */ + ep->stopped = 1; + + spin_unlock(&dev->lock); + /* complete routine from gadget driver */ + if (req->req.complete) + req->req.complete(&ep->ep, &req->req); + + spin_lock(&dev->lock); + ep->stopped = stopped; + + VDBG(dev, "<--- %s()\n", __func__); +} + + +static void langwell_ep_fifo_flush(struct usb_ep *_ep); + +/* delete all endpoint requests, called with spinlock held */ +static void nuke(struct langwell_ep *ep, int status) +{ + /* called with spinlock held */ + ep->stopped = 1; + + /* endpoint fifo flush */ + if (&ep->ep && ep->desc) + langwell_ep_fifo_flush(&ep->ep); + + while (!list_empty(&ep->queue)) { + struct langwell_request *req = NULL; + req = list_entry(ep->queue.next, struct langwell_request, + queue); + done(ep, req, status); + } +} + + +/*-------------------------------------------------------------------------*/ + +/* endpoint is no longer usable */ +static int langwell_ep_disable(struct usb_ep *_ep) +{ + struct langwell_ep *ep; + unsigned long flags; + struct langwell_udc *dev; + int ep_num; + u32 endptctrl; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + if (!_ep || !ep->desc) + return -EINVAL; + + spin_lock_irqsave(&dev->lock, flags); + + /* disable endpoint control register */ + ep_num = ep->ep_num; + endptctrl = readl(&dev->op_regs->endptctrl[ep_num]); + if (is_in(ep)) + endptctrl &= ~EPCTRL_TXE; + else + endptctrl &= ~EPCTRL_RXE; + writel(endptctrl, &dev->op_regs->endptctrl[ep_num]); + + /* nuke all pending requests (does flush) */ + nuke(ep, -ESHUTDOWN); + + ep->desc = NULL; + ep->stopped = 1; + + spin_unlock_irqrestore(&dev->lock, flags); + + DBG(dev, "disabled %s\n", _ep->name); + VDBG(dev, "<--- %s()\n", __func__); + + return 0; +} + + +/* allocate a request object to use with this endpoint */ +static struct usb_request *langwell_alloc_request(struct usb_ep *_ep, + gfp_t gfp_flags) +{ + struct langwell_ep *ep; + struct langwell_udc *dev; + struct langwell_request *req = NULL; + + if (!_ep) + return NULL; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + req = kzalloc(sizeof(*req), gfp_flags); + if (!req) + return NULL; + + req->req.dma = DMA_ADDR_INVALID; + INIT_LIST_HEAD(&req->queue); + + VDBG(dev, "alloc request for %s\n", _ep->name); + VDBG(dev, "<--- %s()\n", __func__); + return &req->req; +} + + +/* free a request object */ +static void langwell_free_request(struct usb_ep *_ep, + struct usb_request *_req) +{ + struct langwell_ep *ep; + struct langwell_udc *dev; + struct langwell_request *req = NULL; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + if (!_ep || !_req) + return; + + req = container_of(_req, struct langwell_request, req); + WARN_ON(!list_empty(&req->queue)); + + if (_req) + kfree(req); + + VDBG(dev, "free request for %s\n", _ep->name); + VDBG(dev, "<--- %s()\n", __func__); +} + + +/*-------------------------------------------------------------------------*/ + +/* queue dTD and PRIME endpoint */ +static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req) +{ + u32 bit_mask, usbcmd, endptstat, dtd_dma; + u8 dtd_status; + int i; + struct langwell_dqh *dqh; + struct langwell_udc *dev; + + dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + i = ep->ep_num * 2 + is_in(ep); + dqh = &dev->ep_dqh[i]; + + if (ep->ep_num) + VDBG(dev, "%s\n", ep->name); + else + /* ep0 */ + VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out"); + + VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i])); + + bit_mask = is_in(ep) ? + (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num)); + + VDBG(dev, "bit_mask = 0x%08x\n", bit_mask); + + /* check if the pipe is empty */ + if (!(list_empty(&ep->queue))) { + /* add dTD to the end of linked list */ + struct langwell_request *lastreq; + lastreq = list_entry(ep->queue.prev, + struct langwell_request, queue); + + lastreq->tail->dtd_next = + cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK); + + /* read prime bit, if 1 goto out */ + if (readl(&dev->op_regs->endptprime) & bit_mask) + goto out; + + do { + /* set ATDTW bit in USBCMD */ + usbcmd = readl(&dev->op_regs->usbcmd); + writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd); + + /* read correct status bit */ + endptstat = readl(&dev->op_regs->endptstat) & bit_mask; + + } while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW)); + + /* write ATDTW bit to 0 */ + usbcmd = readl(&dev->op_regs->usbcmd); + writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd); + + if (endptstat) + goto out; + } + + /* write dQH next pointer and terminate bit to 0 */ + dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK; + dqh->dtd_next = cpu_to_le32(dtd_dma); + + /* clear active and halt bit */ + dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED); + dqh->dtd_status &= dtd_status; + VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status); + + /* write 1 to endptprime register to PRIME endpoint */ + bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num); + VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask); + writel(bit_mask, &dev->op_regs->endptprime); +out: + VDBG(dev, "<--- %s()\n", __func__); + return 0; +} + + +/* fill in the dTD structure to build a transfer descriptor */ +static struct langwell_dtd *build_dtd(struct langwell_request *req, + unsigned *length, dma_addr_t *dma, int *is_last) +{ + u32 buf_ptr; + struct langwell_dtd *dtd; + struct langwell_udc *dev; + int i; + + dev = req->ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + /* the maximum transfer length, up to 16k bytes */ + *length = min(req->req.length - req->req.actual, + (unsigned)DTD_MAX_TRANSFER_LENGTH); + + /* create dTD dma_pool resource */ + dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma); + if (dtd == NULL) + return dtd; + dtd->dtd_dma = *dma; + + /* initialize buffer page pointers */ + buf_ptr = (u32)(req->req.dma + req->req.actual); + for (i = 0; i < 5; i++) + dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE); + + req->req.actual += *length; + + /* fill in total bytes with transfer size */ + dtd->dtd_total = cpu_to_le16(*length); + VDBG(dev, "dtd->dtd_total = %d\n", dtd->dtd_total); + + /* set is_last flag if req->req.zero is set or not */ + if (req->req.zero) { + if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) + *is_last = 1; + else + *is_last = 0; + } else if (req->req.length == req->req.actual) { + *is_last = 1; + } else + *is_last = 0; + + if (*is_last == 0) + VDBG(dev, "multi-dtd request!\n"); + + /* set interrupt on complete bit for the last dTD */ + if (*is_last && !req->req.no_interrupt) + dtd->dtd_ioc = 1; + + /* set multiplier override 0 for non-ISO and non-TX endpoint */ + dtd->dtd_multo = 0; + + /* set the active bit of status field to 1 */ + dtd->dtd_status = DTD_STS_ACTIVE; + VDBG(dev, "dtd->dtd_status = 0x%02x\n", dtd->dtd_status); + + VDBG(dev, "length = %d, dma addr= 0x%08x\n", *length, (int)*dma); + VDBG(dev, "<--- %s()\n", __func__); + return dtd; +} + + +/* generate dTD linked list for a request */ +static int req_to_dtd(struct langwell_request *req) +{ + unsigned count; + int is_last, is_first = 1; + struct langwell_dtd *dtd, *last_dtd = NULL; + struct langwell_udc *dev; + dma_addr_t dma; + + dev = req->ep->dev; + VDBG(dev, "---> %s()\n", __func__); + do { + dtd = build_dtd(req, &count, &dma, &is_last); + if (dtd == NULL) + return -ENOMEM; + + if (is_first) { + is_first = 0; + req->head = dtd; + } else { + last_dtd->dtd_next = cpu_to_le32(dma); + last_dtd->next_dtd_virt = dtd; + } + last_dtd = dtd; + req->dtd_count++; + } while (!is_last); + + /* set terminate bit to 1 for the last dTD */ + dtd->dtd_next = DTD_TERM; + + req->tail = dtd; + + VDBG(dev, "<--- %s()\n", __func__); + return 0; +} + +/*-------------------------------------------------------------------------*/ + +/* queue (submits) an I/O requests to an endpoint */ +static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req, + gfp_t gfp_flags) +{ + struct langwell_request *req; + struct langwell_ep *ep; + struct langwell_udc *dev; + unsigned long flags; + int is_iso = 0, zlflag = 0; + + /* always require a cpu-view buffer */ + req = container_of(_req, struct langwell_request, req); + ep = container_of(_ep, struct langwell_ep, ep); + + if (!_req || !_req->complete || !_req->buf + || !list_empty(&req->queue)) { + return -EINVAL; + } + + if (unlikely(!_ep || !ep->desc)) + return -EINVAL; + + dev = ep->dev; + req->ep = ep; + VDBG(dev, "---> %s()\n", __func__); + + if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { + if (req->req.length > ep->ep.maxpacket) + return -EMSGSIZE; + is_iso = 1; + } + + if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) + return -ESHUTDOWN; + + /* set up dma mapping in case the caller didn't */ + if (_req->dma == DMA_ADDR_INVALID) { + /* WORKAROUND: WARN_ON(size == 0) */ + if (_req->length == 0) { + VDBG(dev, "req->length: 0->1\n"); + zlflag = 1; + _req->length++; + } + + _req->dma = dma_map_single(&dev->pdev->dev, + _req->buf, _req->length, + is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (zlflag && (_req->length == 1)) { + VDBG(dev, "req->length: 1->0\n"); + zlflag = 0; + _req->length = 0; + } + + req->mapped = 1; + VDBG(dev, "req->mapped = 1\n"); + } else { + dma_sync_single_for_device(&dev->pdev->dev, + _req->dma, _req->length, + is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + req->mapped = 0; + VDBG(dev, "req->mapped = 0\n"); + } + + DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n", + _ep->name, + _req, _req->length, _req->buf, _req->dma); + + _req->status = -EINPROGRESS; + _req->actual = 0; + req->dtd_count = 0; + + spin_lock_irqsave(&dev->lock, flags); + + /* build and put dTDs to endpoint queue */ + if (!req_to_dtd(req)) { + queue_dtd(ep, req); + } else { + spin_unlock_irqrestore(&dev->lock, flags); + return -ENOMEM; + } + + /* update ep0 state */ + if (ep->ep_num == 0) + dev->ep0_state = DATA_STATE_XMIT; + + if (likely(req != NULL)) { + list_add_tail(&req->queue, &ep->queue); + VDBG(dev, "list_add_tail() \n"); + } + + spin_unlock_irqrestore(&dev->lock, flags); + + VDBG(dev, "<--- %s()\n", __func__); + return 0; +} + + +/* dequeue (cancels, unlinks) an I/O request from an endpoint */ +static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct langwell_ep *ep; + struct langwell_udc *dev; + struct langwell_request *req; + unsigned long flags; + int stopped, ep_num, retval = 0; + u32 endptctrl; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + if (!_ep || !ep->desc || !_req) + return -EINVAL; + + if (!dev->driver) + return -ESHUTDOWN; + + spin_lock_irqsave(&dev->lock, flags); + stopped = ep->stopped; + + /* quiesce dma while we patch the queue */ + ep->stopped = 1; + ep_num = ep->ep_num; + + /* disable endpoint control register */ + endptctrl = readl(&dev->op_regs->endptctrl[ep_num]); + if (is_in(ep)) + endptctrl &= ~EPCTRL_TXE; + else + endptctrl &= ~EPCTRL_RXE; + writel(endptctrl, &dev->op_regs->endptctrl[ep_num]); + + /* make sure it's still queued on this endpoint */ + list_for_each_entry(req, &ep->queue, queue) { + if (&req->req == _req) + break; + } + + if (&req->req != _req) { + retval = -EINVAL; + goto done; + } + + /* queue head may be partially complete. */ + if (ep->queue.next == &req->queue) { + DBG(dev, "unlink (%s) dma\n", _ep->name); + _req->status = -ECONNRESET; + langwell_ep_fifo_flush(&ep->ep); + + /* not the last request in endpoint queue */ + if (likely(ep->queue.next == &req->queue)) { + struct langwell_dqh *dqh; + struct langwell_request *next_req; + + dqh = ep->dqh; + next_req = list_entry(req->queue.next, + struct langwell_request, queue); + + /* point the dQH to the first dTD of next request */ + writel((u32) next_req->head, &dqh->dqh_current); + } + } else { + struct langwell_request *prev_req; + + prev_req = list_entry(req->queue.prev, + struct langwell_request, queue); + writel(readl(&req->tail->dtd_next), + &prev_req->tail->dtd_next); + } + + done(ep, req, -ECONNRESET); + +done: + /* enable endpoint again */ + endptctrl = readl(&dev->op_regs->endptctrl[ep_num]); + if (is_in(ep)) + endptctrl |= EPCTRL_TXE; + else + endptctrl |= EPCTRL_RXE; + writel(endptctrl, &dev->op_regs->endptctrl[ep_num]); + + ep->stopped = stopped; + spin_unlock_irqrestore(&dev->lock, flags); + + VDBG(dev, "<--- %s()\n", __func__); + return retval; +} + + +/*-------------------------------------------------------------------------*/ + +/* endpoint set/clear halt */ +static void ep_set_halt(struct langwell_ep *ep, int value) +{ + u32 endptctrl = 0; + int ep_num; + struct langwell_udc *dev = ep->dev; + VDBG(dev, "---> %s()\n", __func__); + + ep_num = ep->ep_num; + endptctrl = readl(&dev->op_regs->endptctrl[ep_num]); + + /* value: 1 - set halt, 0 - clear halt */ + if (value) { + /* set the stall bit */ + if (is_in(ep)) + endptctrl |= EPCTRL_TXS; + else + endptctrl |= EPCTRL_RXS; + } else { + /* clear the stall bit and reset data toggle */ + if (is_in(ep)) { + endptctrl &= ~EPCTRL_TXS; + endptctrl |= EPCTRL_TXR; + } else { + endptctrl &= ~EPCTRL_RXS; + endptctrl |= EPCTRL_RXR; + } + } + + writel(endptctrl, &dev->op_regs->endptctrl[ep_num]); + + VDBG(dev, "<--- %s()\n", __func__); +} + + +/* set the endpoint halt feature */ +static int langwell_ep_set_halt(struct usb_ep *_ep, int value) +{ + struct langwell_ep *ep; + struct langwell_udc *dev; + unsigned long flags; + int retval = 0; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + + VDBG(dev, "---> %s()\n", __func__); + + if (!_ep || !ep->desc) + return -EINVAL; + + if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) + return -ESHUTDOWN; + + if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_ISOC) + return -EOPNOTSUPP; + + spin_lock_irqsave(&dev->lock, flags); + + /* + * attempt to halt IN ep will fail if any transfer requests + * are still queue + */ + if (!list_empty(&ep->queue) && is_in(ep) && value) { + /* IN endpoint FIFO holds bytes */ + DBG(dev, "%s FIFO holds bytes\n", _ep->name); + retval = -EAGAIN; + goto done; + } + + /* endpoint set/clear halt */ + if (ep->ep_num) { + ep_set_halt(ep, value); + } else { /* endpoint 0 */ + dev->ep0_state = WAIT_FOR_SETUP; + dev->ep0_dir = USB_DIR_OUT; + } +done: + spin_unlock_irqrestore(&dev->lock, flags); + DBG(dev, "%s %s halt\n", _ep->name, value ? "set" : "clear"); + VDBG(dev, "<--- %s()\n", __func__); + return retval; +} + + +/* set the halt feature and ignores clear requests */ +static int langwell_ep_set_wedge(struct usb_ep *_ep) +{ + struct langwell_ep *ep; + struct langwell_udc *dev; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + + VDBG(dev, "---> %s()\n", __func__); + + if (!_ep || !ep->desc) + return -EINVAL; + + VDBG(dev, "<--- %s()\n", __func__); + return usb_ep_set_halt(_ep); +} + + +/* flush contents of a fifo */ +static void langwell_ep_fifo_flush(struct usb_ep *_ep) +{ + struct langwell_ep *ep; + struct langwell_udc *dev; + u32 flush_bit; + unsigned long timeout; + + ep = container_of(_ep, struct langwell_ep, ep); + dev = ep->dev; + + VDBG(dev, "---> %s()\n", __func__); + + if (!_ep || !ep->desc) { + VDBG(dev, "ep or ep->desc is NULL\n"); + VDBG(dev, "<--- %s()\n", __func__); + return; + } + + VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out"); + + /* flush endpoint buffer */ + if (ep->ep_num == 0) + flush_bit = (1 << 16) | 1; + else if (is_in(ep)) + flush_bit = 1 << (ep->ep_num + 16); /* TX */ + else + flush_bit = 1 << ep->ep_num; /* RX */ + + /* wait until flush complete */ + timeout = jiffies + FLUSH_TIMEOUT; + do { + writel(flush_bit, &dev->op_regs->endptflush); + while (readl(&dev->op_regs->endptflush)) { + if (time_after(jiffies, timeout)) { + ERROR(dev, "ep flush timeout\n"); + goto done; + } + cpu_relax(); + } + } while (readl(&dev->op_regs->endptstat) & flush_bit); +done: + VDBG(dev, "<--- %s()\n", __func__); +} + + +/* endpoints operations structure */ +static const struct usb_ep_ops langwell_ep_ops = { + + /* configure endpoint, making it usable */ + .enable = langwell_ep_enable, + + /* endpoint is no longer usable */ + .disable = langwell_ep_disable, + + /* allocate a request object to use with this endpoint */ + .alloc_request = langwell_alloc_request, + + /* free a request object */ + .free_request = langwell_free_request, + + /* queue (submits) an I/O requests to an endpoint */ + .queue = langwell_ep_queue, + + /* dequeue (cancels, unlinks) an I/O request from an endpoint */ + .dequeue = langwell_ep_dequeue, + + /* set the endpoint halt feature */ + .set_halt = langwell_ep_set_halt, + + /* set the halt feature and ignores clear requests */ + .set_wedge = langwell_ep_set_wedge, + + /* flush contents of a fifo */ + .fifo_flush = langwell_ep_fifo_flush, +}; + + +/*-------------------------------------------------------------------------*/ + +/* device controller usb_gadget_ops structure */ + +/* returns the current frame number */ +static int langwell_get_frame(struct usb_gadget *_gadget) +{ + struct langwell_udc *dev; + u16 retval; + + if (!_gadget) + return -ENODEV; + + dev = container_of(_gadget, struct langwell_udc, gadget); + VDBG(dev, "---> %s()\n", __func__); + + retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK; + + VDBG(dev, "<--- %s()\n", __func__); + return retval; +} + + +/* tries to wake up the host connected to this gadget */ +static int langwell_wakeup(struct usb_gadget *_gadget) +{ + struct langwell_udc *dev; + u32 portsc1, devlc; + unsigned long flags; + + if (!_gadget) + return 0; + + dev = container_of(_gadget, struct langwell_udc, gadget); + VDBG(dev, "---> %s()\n", __func__); + + /* Remote Wakeup feature not enabled by host */ + if (!dev->remote_wakeup) + return -ENOTSUPP; + + spin_lock_irqsave(&dev->lock, flags); + + portsc1 = readl(&dev->op_regs->portsc1); + if (!(portsc1 & PORTS_SUSP)) { + spin_unlock_irqrestore(&dev->lock, flags); + return 0; + } + + /* LPM L1 to L0, remote wakeup */ + if (dev->lpm && dev->lpm_state == LPM_L1) { + |