aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c2170
1 files changed, 2170 insertions, 0 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 00000000000..8b4be012669
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2170 @@
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musb_core.h"
+#include "musb_host.h"
+
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ * they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ * + including ep0, with all usbtest cases 9, 10
+ * + usbtest 14 (ep0out) doesn't seem to run at all
+ * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ * configurations, but otherwise double buffering passes basic tests.
+ * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ * + about 1/15 the speed of typical EHCI implementations (PCI)
+ * + RX, all too often reqpkt seems to misbehave after tx
+ * + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - Still no traffic scheduling code to make NAKing for bulk or control
+ * transfers unable to starve other requests; or to make efficient use
+ * of hardware with periodic transfers. (Note that network drivers
+ * commonly post bulk reads that stay pending for a long time; these
+ * would make very visible trouble.)
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ * extra endpoint for periodic use enabling hub + keybd + mouse. That
+ * mostly works, except that with "usbnet" it's easy to trigger cases
+ * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
+ * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ * although ARP RX wins. (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ *
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it ... one remote device may easily be NAKing while others
+ * need to perform transfers in that same direction. The same thing could
+ * be done in software though, assuming dma cooperates.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic: the endpoint will be
+ * "claimed" until its software queue is no longer refilled. No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int nOut,
+ u8 *buf, u32 len);
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+{
+ void __iomem *epio = ep->regs;
+ u16 csr;
+ int retries = 1000;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (retries-- < 1) {
+ ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+ return;
+ }
+ mdelay(1);
+ }
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musb_h_tx_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ if (ep->epnum) {
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+ } else {
+ txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
+ musb_writew(ep->regs, MUSB_CSR0, txcsr);
+ }
+
+}
+
+static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+ u16 frame;
+ u32 len;
+ void *buf;
+ void __iomem *mbase = musb->mregs;
+ struct urb *urb = next_urb(qh);
+ struct musb_hw_ep *hw_ep = qh->hw_ep;
+ unsigned pipe = urb->pipe;
+ u8 address = usb_pipedevice(pipe);
+ int epnum = hw_ep->epnum;
+
+ /* initialize software qh state */
+ qh->offset = 0;
+ qh->segsize = 0;
+
+ /* gather right source of data */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* control transfers always start with SETUP */
+ is_in = 0;
+ hw_ep->out_qh = qh;
+ musb->ep0_stage = MUSB_EP0_START;
+ buf = urb->setup_packet;
+ len = 8;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ qh->iso_idx = 0;
+ qh->frame = 0;
+ buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+ len = urb->iso_frame_desc[0].length;
+ break;
+ default: /* bulk, interrupt */
+ buf = urb->transfer_buffer;
+ len = urb->transfer_buffer_length;
+ }
+
+ DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+ qh, urb, address, qh->epnum,
+ is_in ? "in" : "out",
+ ({char *s; switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
+ case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
+ default: s = "-intr"; break;
+ }; s; }),
+ epnum, buf, len);
+
+ /* Configure endpoint */
+ if (is_in || hw_ep->is_shared_fifo)
+ hw_ep->in_qh = qh;
+ else
+ hw_ep->out_qh = qh;
+ musb_ep_program(musb, epnum, urb, !is_in, buf, len);
+
+ /* transmit may have more work: start it when it is time */
+ if (is_in)
+ return;
+
+ /* determine if the time is right for a periodic transfer */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ DBG(3, "check whether there's still time for periodic Tx\n");
+ qh->iso_idx = 0;
+ frame = musb_readw(mbase, MUSB_FRAME);
+ /* FIXME this doesn't implement that scheduling policy ...
+ * or handle framecounter wrapping
+ */
+ if ((urb->transfer_flags & URB_ISO_ASAP)
+ || (frame >= urb->start_frame)) {
+ /* REVISIT the SOF irq handler shouldn't duplicate
+ * this code; and we don't init urb->start_frame...
+ */
+ qh->frame = 0;
+ goto start;
+ } else {
+ qh->frame = urb->start_frame;
+ /* enable SOF interrupt so we can count down */
+ DBG(1, "SOF for %d\n", epnum);
+#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
+#endif
+ }
+ break;
+ default:
+start:
+ DBG(4, "Start TX%d %s\n", epnum,
+ hw_ep->tx_channel ? "dma" : "pio");
+
+ if (!hw_ep->tx_channel)
+ musb_h_tx_start(hw_ep);
+ else if (is_cppi_enabled() || tusb_dma_omap())
+ cppi_host_txdma_start(hw_ep);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static void
+__musb_giveback(struct musb *musb, struct urb *urb, int status)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ DBG(({ int level; switch (urb->status) {
+ case 0:
+ level = 4;
+ break;
+ /* common/boring faults */
+ case -EREMOTEIO:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ case -EPIPE:
+ level = 3;
+ break;
+ default:
+ level = 2;
+ break;
+ }; level; }),
+ "complete %p (%d), dev%d ep%d%s, %d/%d\n",
+ urb, urb->status,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->actual_length, urb->transfer_buffer_length
+ );
+
+ spin_unlock(&musb->lock);
+ usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+ spin_lock(&musb->lock);
+}
+
+/* for bulk/interrupt endpoints only */
+static inline void
+musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+{
+ struct usb_device *udev = urb->dev;
+ u16 csr;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *qh;
+
+ /* FIXME: the current Mentor DMA code seems to have
+ * problems getting toggle correct.
+ */
+
+ if (is_in || ep->is_shared_fifo)
+ qh = ep->in_qh;
+ else
+ qh = ep->out_qh;
+
+ if (!is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ usb_settoggle(udev, qh->epnum, 1,
+ (csr & MUSB_TXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ usb_settoggle(udev, qh->epnum, 0,
+ (csr & MUSB_RXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static struct musb_qh *
+musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+{
+ int is_in;
+ struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
+ int ready = qh->is_ready;
+
+ if (ep->is_shared_fifo)
+ is_in = 1;
+ else
+ is_in = usb_pipein(urb->pipe);
+
+ /* save toggle eagerly, for paranoia */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ musb_save_toggle(ep, is_in, urb);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (status == 0 && urb->error_count)
+ status = -EXDEV;
+ break;
+ }
+
+ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
+ qh->is_ready = 0;
+ __musb_giveback(musb, urb, status);
+ qh->is_ready = ready;
+
+ /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ * invalidate qh as soon as list_empty(&hep->urb_list)
+ */
+ if (list_empty(&qh->hep->urb_list)) {
+ struct list_head *head;
+
+ if (is_in)
+ ep->rx_reinit = 1;
+ else
+ ep->tx_reinit = 1;
+
+ /* clobber old pointers to this qh */
+ if (is_in || ep->is_shared_fifo)
+ ep->in_qh = NULL;
+ else
+ ep->out_qh = NULL;
+ qh->hep->hcpriv = NULL;
+
+ switch (qh->type) {
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ /* this is where periodic bandwidth should be
+ * de-allocated if it's tracked and allocated;
+ * and where we'd update the schedule tree...
+ */
+ musb->periodic[ep->epnum] = NULL;
+ kfree(qh);
+ qh = NULL;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ /* fifo policy for these lists, except that NAKing
+ * should rotate a qh to the end (for fairness).
+ */
+ head = qh->ring.prev;
+ list_del(&qh->ring);
+ kfree(qh);
+ qh = first_qh(head);
+ break;
+ }
+ }
+ return qh;
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified urb and
+ * advancing to either the next urb queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, irqs are blocked
+ */
+static void
+musb_advance_schedule(struct musb *musb, struct urb *urb,
+ struct musb_hw_ep *hw_ep, int is_in)
+{
+ struct musb_qh *qh;
+
+ if (is_in || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ if (urb->status == -EINPROGRESS)
+ qh = musb_giveback(qh, urb, 0);
+ else
+ qh = musb_giveback(qh, urb, urb->status);
+
+ if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+ DBG(4, "... next ep%d %cX urb %p\n",
+ hw_ep->epnum, is_in ? 'R' : 'T',
+ next_urb(qh));
+ musb_start_urb(musb, is_in, qh);
+ }
+}
+
+static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+ /* we don't want fifo to fill itself again;
+ * ignore dma (various models),
+ * leave toggle alone (may not have been saved yet)
+ */
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
+ csr &= ~(MUSB_RXCSR_H_REQPKT
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+
+ /* write 2x to allow double buffering */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+
+ /* flush writebuffer */
+ return musb_readw(hw_ep->regs, MUSB_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+{
+ u16 rx_count;
+ u8 *buf;
+ u16 csr;
+ bool done = false;
+ u32 length;
+ int do_flush = 0;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ int pipe = urb->pipe;
+ void *buffer = urb->transfer_buffer;
+
+ /* musb_ep_select(mbase, epnum); */
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ urb->transfer_buffer, qh->offset,
+ urb->transfer_buffer_length);
+
+ /* unload FIFO */
+ if (usb_pipeisoc(pipe)) {
+ int status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ if (iso_err) {
+ status = -EILSEQ;
+ urb->error_count++;
+ }
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ buf = buffer + d->offset;
+ length = d->length;
+ if (rx_count > length) {
+ if (status == 0) {
+ status = -EOVERFLOW;
+ urb->error_count++;
+ }
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ d->actual_length = length;
+
+ d->status = status;
+
+ /* see if we are done */
+ done = (++qh->iso_idx >= urb->number_of_packets);
+ } else {
+ /* non-isoch */
+ buf = buffer + qh->offset;
+ length = urb->transfer_buffer_length - qh->offset;
+ if (rx_count > length) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = -EOVERFLOW;
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ qh->offset += length;
+
+ /* see if we are done */
+ done = (urb->actual_length == urb->transfer_buffer_length)
+ || (rx_count < qh->maxpacket)
+ || (urb->status != -EINPROGRESS);
+ if (done
+ && (urb->status == -EINPROGRESS)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->actual_length
+ < urb->transfer_buffer_length))
+ urb->status = -EREMOTEIO;
+ }
+
+ musb_read_fifo(hw_ep, length, buf);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ if (unlikely(do_flush))
+ musb_h_flush_rxfifo(hw_ep, csr);
+ else {
+ /* REVISIT this assumes AUTOCLEAR is never set */
+ csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
+ if (!done)
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+{
+ u16 csr;
+
+ /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
+ * That always uses tx_reinit since ep0 repurposes TX register
+ * offsets; the initial SETUP packet is also a kind of OUT.
+ */
+
+ /* if programmed for Tx, put it in RX mode */
+ if (ep->is_shared_fifo) {
+ csr = musb_readw(ep->regs, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_MODE) {
+ musb_h_tx_flush_fifo(ep);
+ musb_writew(ep->regs, MUSB_TXCSR,
+ MUSB_TXCSR_FRCDATATOG);
+ }
+ /* clear mode (and everything else) to enable Rx */
+ musb_writew(ep->regs, MUSB_TXCSR, 0);
+
+ /* scrub all previous state, clearing toggle */
+ } else {
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY)
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+ musb_readw(ep->regs, MUSB_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
+ qh->addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
+ qh->h_addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
+ qh->h_port_reg);
+ } else
+ musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint, interval/NAKlimit, i/o size */
+ musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
+ musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+ /* NOTE: bulk combining rewrites high bits of maxpacket */
+ musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+
+ ep->rx_reinit = 0;
+}
+
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int is_out,
+ u8 *buf, u32 len)
+{
+ struct dma_controller *dma_controller;
+ struct dma_channel *dma_channel;
+ u8 dma_ok;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh;
+ u16 packet_sz;
+
+ if (!is_out || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ packet_sz = qh->maxpacket;
+
+ DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d\n",
+ is_out ? "-->" : "<--",
+ epnum, urb, urb->dev->speed,
+ qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+ qh->h_addr_reg, qh->h_port_reg,
+ len);
+
+ musb_ep_select(mbase, epnum);
+
+ /* candidate for DMA? */
+ dma_controller = musb->dma_controller;
+ if (is_dma_capable() && epnum && dma_controller) {
+ dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+ if (!dma_channel) {
+ dma_channel = dma_controller->channel_alloc(
+ dma_controller, hw_ep, is_out);
+ if (is_out)
+ hw_ep->tx_channel = dma_channel;
+ else
+ hw_ep->rx_channel = dma_channel;
+ }
+ } else
+ dma_channel = NULL;
+
+ /* make sure we clear DMAEnab, autoSet bits from previous run */
+
+ /* OUT/transmit/EP0 or IN/receive? */
+ if (is_out) {
+ u16 csr;
+ u16 int_txe;
+ u16 load_count;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* disable interrupt in case we flush */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ /* general endpoint setup */
+ if (epnum) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* flush all old state, set default */
+ musb_h_tx_flush_fifo(hw_ep);
+ csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_FRCDATATOG
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY
+ );
+ csr |= MUSB_TXCSR_MODE;
+
+ if (usb_gettoggle(urb->dev,
+ qh->epnum, 1))
+ csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MUSB_TXCSR_CLRDATATOG;
+
+ /* twice in case of double packet buffering */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ } else {
+ /* endpoint 0: just flush */
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+ qh->addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+ qh->h_addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+ qh->h_port_reg);
+/* FIXME if !epnum, do the same for RX ... */
+ } else
+ musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint/interval/NAKlimit */
+ if (epnum) {
+ musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+ if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
+ else
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz);
+ musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+ } else {
+ musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+ if (musb->is_multipoint)
+ musb_writeb(epio, MUSB_TYPE0,
+ qh->type_reg);
+ }
+
+ if (can_bulk_split(musb, qh->type))
+ load_count = min((u32) hw_ep->max_packet_sz_tx,
+ len);
+ else
+ load_count = min((u32) packet_sz, len);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma_channel) {
+
+ /* clear previous state */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ qh->segsize = min(len, dma_channel->max_len);
+
+ if (qh->segsize <= packet_sz)
+ dma_channel->desired_mode = 0;
+ else
+ dma_channel->desired_mode = 1;
+
+
+ if (dma_channel->desired_mode == 0) {
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE);
+ csr |= (MUSB_TXCSR_DMAENAB);
+ /* against programming guide */
+ } else
+ csr |= (MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE);
+
+ musb_writew(epio, MUSB_TXCSR, csr);
+
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ dma_channel->desired_mode,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ if (is_out)
+ hw_ep->tx_channel = NULL;
+ else
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ }
+ }
+#endif
+
+ /* candidate for DMA */
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+
+ /* program endpoint CSRs first, then setup DMA.
+ * assume CPPI setup succeeds.
+ * defer enabling dma.
+ */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* TX uses "rndis" mode automatically, but needs help
+ * to identify the zero-length-final-packet case.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ (urb->transfer_flags
+ & URB_ZERO_PACKET)
+ == URB_ZERO_PACKET,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ hw_ep->tx_channel = NULL;
+ dma_channel = NULL;
+
+ /* REVISIT there's an error path here that
+ * needs handling: can't do dma, but
+ * there's no pio buffer address...
+ */
+ }
+ }
+
+ if (load_count) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* PIO to load FIFO */
+ qh->segsize = load_count;
+ musb_write_fifo(hw_ep, load_count, buf);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_AUTOSET);
+ /* write CSR */
+ csr |= MUSB_TXCSR_MODE;
+
+ if (epnum)
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* IN/receive */
+ } else {
+ u16 csr;
+
+ if (hw_ep->rx_reinit) {
+ musb_rx_reinit(musb, qh, hw_ep);
+
+ /* init new state: toggle and NYET, maybe DMA later */
+ if (usb_gettoggle(urb->dev, qh->epnum, 0))
+ csr = MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE;
+ else
+ csr = 0;
+ if (qh->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ } else {
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ if (csr & (MUSB_RXCSR_RXPKTRDY
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_REQPKT))
+ ERR("broken !rx_reinit, ep%d csr %04x\n",
+ hw_ep->epnum, csr);
+
+ /* scrub any stale state, leaving toggle alone */
+ csr &= MUSB_RXCSR_DISNYET;
+ }
+
+ /* kick things off */
+
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+ /* candidate for DMA */
+ if (dma_channel) {
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs,
+ MUSB_RXCSR);
+
+ /* unless caller treats short rx transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ !(urb->transfer_flags
+ & URB_SHORT_NOT_OK),
+ urb->transfer_dma,
+ qh->segsize);
+ if (!dma_ok) {
+ dma_controller->channel_release(
+ dma_channel);
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ } else
+ csr |= MUSB_RXCSR_DMAENAB;
+ }
+ }
+
+ csr |= MUSB_RXCSR_H_REQPKT;
+ DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ }
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return true until it's time to start the status stage.
+ */
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+{
+ bool more = false;
+ u8 *fifo_dest = NULL;
+ u16 fifo_count = 0;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ struct musb_qh *qh = hw_ep->in_qh;
+ struct usb_ctrlrequest *request;
+
+ switch (musb->ep0_stage) {
+ case MUSB_EP0_IN:
+ fifo_dest = urb->transfer_buffer + urb->actual_length;
+ fifo_count = min(len, ((u16) (urb->transfer_buffer_length
+ - urb->actual_length)));
+ if (fifo_count < len)
+ urb->status = -EOVERFLOW;
+
+ musb_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ if (len < qh->maxpacket) {
+ /* always terminate on short read; it's
+ * rarely reported as an error.
+ */
+ } else if (urb->actual_length <
+ urb->transfer_buffer_length)
+ more = true;
+ break;
+ case MUSB_EP0_START:
+ request = (struct usb_ctrlrequest *) urb->setup_packet;
+
+ if (!request->wLength) {
+ DBG(4, "start no-DATA\n");
+ break;
+ } else if (request->bRequestType & USB_DIR_IN) {
+ DBG(4, "start IN-DATA\n");
+ musb->ep0_stage = MUSB_EP0_IN;
+ more = true;
+ break;
+ } else {
+ DBG(4, "start OUT-DATA\n");
+ musb->ep0_stage = MUSB_EP0_OUT;
+ more = true;
+ }
+ /* FALLTHROUGH */
+ case MUSB_EP0_OUT:
+ fifo_count = min(qh->maxpacket, ((u16)
+ (urb->transfer_buffer_length
+ - urb->actual_length)));
+
+ if (fifo_count) {
+ fifo_dest = (u8 *) (urb->transfer_buffer
+ + urb->actual_length);
+ DBG(3, "Sending %d bytes to %p\n",
+ fifo_count, fifo_dest);
+ musb_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ more = true;
+ }
+ break;
+ default:
+ ERR("bogus ep0 stage %d\n", musb->ep0_stage);
+ break;
+ }
+
+ return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from the LinuxIsr() interrupt service routine.
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+ struct urb *urb;
+ u16 csr, len;
+ int status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ bool complete = false;
+ irqreturn_t retval = IRQ_NONE;
+
+ /* ep0 only has one queue, "in" */
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, 0);
+ csr = musb_readw(epio, MUSB_CSR0);
+ len = (csr & MUSB_CSR0_RXPKTRDY)
+ ? musb_readb(epio, MUSB_COUNT0)
+ : 0;
+
+ DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ csr, qh, len, urb, musb->ep0_stage);
+
+ /* if we just did status stage, we are done */
+ if (MUSB_EP0_STATUS == musb->ep0_stage) {
+ retval = IRQ_HANDLED;
+ complete = true;
+ }
+
+ /* prepare status */
+ if (csr & MUSB_CSR0_H_RXSTALL) {
+ DBG(6, "STALLING ENDPOINT\n");
+ status = -EPIPE;
+
+ } else if (csr & MUSB_CSR0_H_ERROR) {
+ DBG(2, "no response, csr0 %04x\n", csr);
+ status = -EPROTO;
+
+ } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
+ DBG(2, "control NAK timeout\n");
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * control transfer, if another one is queued, so that
+ * ep0 is more likely to stay busy.
+ *
+ * if (qh->ring.next != &musb->control), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_writew(epio, MUSB_CSR0, 0);
+ retval = IRQ_HANDLED;
+ }
+
+ if (status) {
+ DBG(6, "aborting\n");
+ retval = IRQ_HANDLED;
+ if (urb)
+ urb->status = status;
+ complete = true;
+
+ /* use the proper sequence to abort the transfer */
+ if (csr & MUSB_CSR0_H_REQPKT) {
+ csr &= ~MUSB_CSR0_H_REQPKT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ } else {
+ csr |= MUSB_CSR0_FLUSHFIFO;
+ musb_writew(epio, MUSB_CSR0, csr);
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ }
+
+ musb_writeb(epio, MUSB_NAKLIMIT0, 0);
+
+ /* clear it */
+ musb_writew(epio, MUSB_CSR0, 0);
+ }
+
+ if (unlikely(!urb)) {
+ /* stop endpoint since we have no place for its data, this
+ * SHOULD NEVER HAPPEN! */
+ ERR("no URB for end 0\n");
+
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, 0);
+
+ goto done;
+ }
+
+ if (!complete) {
+ /* call common logic and prepare response */
+ if (musb_h_ep0_continue(musb, len, urb)) {
+ /* more packets required */
+ csr = (MUSB_EP0_IN == musb->ep0_stage)
+ ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
+ } else {
+ /* data transfer complete; perform status phase */
+ if (usb_pipeout(urb->pipe)
+ || !urb->transfer_buffer_length)
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_H_REQPKT;
+ else
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_TXPKTRDY;
+
+ /* flag status stage */
+ musb->ep0_stage = MUSB_EP0_STATUS;
+
+ DBG(5, "ep0 STATUS, csr %04x\n", csr);
+
+ }
+ musb_writew(epio, MUSB_CSR0, csr);
+ retval = IRQ_HANDLED;
+ } else
+ musb->ep0_stage = MUSB_EP0_IDLE;
+
+ /* call completion handler if done */
+ if (complete)
+ musb_advance_schedule(musb, urb, hw_ep, 1);
+done:
+ return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, Program Endpoint
+ - ... which starts DMA to fifo in mode 1 or 0
+
+ DMA Isr (transfer complete) -> TxAvail()
+ - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
+ only in musb_cleanup_urb)
+ - TxPktRdy has to be set in mode 0 or for
+ short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *musb, u8 epnum)
+{
+ int pipe;
+ bool done = false;
+ u16 tx_csr;
+ size_t wLength = 0;
+ u8 *buf = NULL;
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->out_qh;
+ u32 status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct dma_channel *dma;
+
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, epnum);
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* with CPPI, DMA sometimes triggers "