aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c747
1 files changed, 555 insertions, 192 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 0f523d7db57..eb06291a40c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -39,14 +39,12 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include "musb_core.h"
#include "musb_host.h"
-
/* MUSB HOST status 22-mar-2006
*
* - There's still lots of partial code duplication for fault paths, so
@@ -96,6 +94,11 @@
* of transfers between endpoints, or anything clever.
*/
+struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return *(struct musb **) hcd->hcd_priv;
+}
+
static void musb_ep_program(struct musb *musb, u8 epnum,
struct urb *urb, int is_out,
@@ -106,6 +109,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
*/
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
+ struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
u16 csr;
u16 lastcsr = 0;
@@ -114,7 +118,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
if (csr != lastcsr)
- DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
lastcsr = csr;
csr |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_TXCSR, csr);
@@ -240,7 +244,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
len = urb->transfer_buffer_length - urb->actual_length;
}
- DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+ dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
qh, urb, address, qh->epnum,
is_in ? "in" : "out",
({char *s; switch (qh->type) {
@@ -248,7 +252,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
default: s = "-intr"; break;
- }; s; }),
+ } s; }),
epnum, buf + offset, len);
/* Configure endpoint */
@@ -263,13 +267,12 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
- DBG(3, "check whether there's still time for periodic Tx\n");
+ dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
frame = musb_readw(mbase, MUSB_FRAME);
/* FIXME this doesn't implement that scheduling policy ...
* or handle framecounter wrapping
*/
- if ((urb->transfer_flags & URB_ISO_ASAP)
- || (frame >= urb->start_frame)) {
+ if (1) { /* Always assume URB_ISO_ASAP */
/* REVISIT the SOF irq handler shouldn't duplicate
* this code; and we don't init urb->start_frame...
*/
@@ -278,7 +281,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
} else {
qh->frame = urb->start_frame;
/* enable SOF interrupt so we can count down */
- DBG(1, "SOF for %d\n", epnum);
+ dev_dbg(musb->controller, "SOF for %d\n", epnum);
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
@@ -286,7 +289,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
break;
default:
start:
- DBG(4, "Start TX%d %s\n", epnum,
+ dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
hw_ep->tx_channel ? "dma" : "pio");
if (!hw_ep->tx_channel)
@@ -301,21 +304,7 @@ static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
- DBG(({ int level; switch (status) {
- case 0:
- level = 4;
- break;
- /* common/boring faults */
- case -EREMOTEIO:
- case -ESHUTDOWN:
- case -ECONNRESET:
- case -EPIPE:
- level = 3;
- break;
- default:
- level = 2;
- break;
- }; level; }),
+ dev_dbg(musb->controller,
"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
urb, urb->complete, status,
usb_pipedevice(urb->pipe),
@@ -324,9 +313,9 @@ __acquires(musb->lock)
urb->actual_length, urb->transfer_buffer_length
);
- usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+ usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
spin_unlock(&musb->lock);
- usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+ usb_hcd_giveback_urb(musb->hcd, urb, status);
spin_lock(&musb->lock);
}
@@ -388,11 +377,21 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
*/
if (list_empty(&qh->hep->urb_list)) {
struct list_head *head;
+ struct dma_controller *dma = musb->dma_controller;
- if (is_in)
+ if (is_in) {
ep->rx_reinit = 1;
- else
+ if (ep->rx_channel) {
+ dma->channel_release(ep->rx_channel);
+ ep->rx_channel = NULL;
+ }
+ } else {
ep->tx_reinit = 1;
+ if (ep->tx_channel) {
+ dma->channel_release(ep->tx_channel);
+ ep->tx_channel = NULL;
+ }
+ }
/* Clobber old pointers to this qh */
musb_ep_set_qh(ep, is_in, NULL);
@@ -426,7 +425,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
}
if (qh != NULL && qh->is_ready) {
- DBG(4, "... next ep%d %cX urb %p\n",
+ dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
@@ -471,7 +470,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
/* musb_ep_select(mbase, epnum); */
rx_count = musb_readw(epio, MUSB_RXCOUNT);
- DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
urb->transfer_buffer, qh->offset,
urb->transfer_buffer_length);
@@ -493,7 +492,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
status = -EOVERFLOW;
urb->error_count++;
}
- DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -511,7 +510,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
if (rx_count > length) {
if (urb->status == -EINPROGRESS)
urb->status = -EOVERFLOW;
- DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -628,7 +627,7 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
u16 csr;
u8 mode;
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (length > channel->max_len)
length = channel->max_len;
@@ -637,7 +636,17 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
- if (qh->hb_mult == 1)
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 1 Yes(Normal)
+ * 0 >1 No(High BW ISO)
+ * 1 1 Yes(HS bulk)
+ * 1 >1 Yes(FS bulk)
+ */
+ if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
+ can_bulk_split(hw_ep->musb, qh->type)))
csr |= MUSB_TXCSR_AUTOSET;
} else {
mode = 0;
@@ -696,8 +705,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
u16 packet_sz = qh->maxpacket;
+ u8 use_dma = 1;
+ u16 csr;
- DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
"h_addr%02x h_port%02x bytes %d\n",
is_out ? "-->" : "<--",
epnum, urb, urb->dev->speed,
@@ -707,9 +718,17 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
musb_ep_select(mbase, epnum);
+ if (is_out && !len) {
+ use_dma = 0;
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ hw_ep->tx_channel = NULL;
+ }
+
/* candidate for DMA? */
dma_controller = musb->dma_controller;
- if (is_dma_capable() && epnum && dma_controller) {
+ if (use_dma && is_dma_capable() && epnum && dma_controller) {
dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
if (!dma_channel) {
dma_channel = dma_controller->channel_alloc(
@@ -733,13 +752,19 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
csr = musb_readw(epio, MUSB_TXCSR);
/* disable interrupt in case we flush */
- int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ int_txe = musb->intrtxe;
musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
/* general endpoint setup */
if (epnum) {
/* flush all old state, set default */
- musb_h_tx_flush_fifo(hw_ep);
+ /*
+ * We could be flushing valid
+ * packets in double buffering
+ * case
+ */
+ if (!hw_ep->tx_double_buffered)
+ musb_h_tx_flush_fifo(hw_ep);
/*
* We must not clear the DMAMODE bit before or in
@@ -756,11 +781,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
);
csr |= MUSB_TXCSR_MODE;
- if (usb_gettoggle(urb->dev, qh->epnum, 1))
- csr |= MUSB_TXCSR_H_WR_DATATOGGLE
- | MUSB_TXCSR_H_DATATOGGLE;
- else
- csr |= MUSB_TXCSR_CLRDATATOG;
+ if (!hw_ep->tx_double_buffered) {
+ if (usb_gettoggle(urb->dev, qh->epnum, 1))
+ csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MUSB_TXCSR_CLRDATATOG;
+ }
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
@@ -784,13 +811,19 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
- if (musb->double_buffer_not_ok)
+ if (musb->double_buffer_not_ok) {
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
- else
+ } else if (can_bulk_split(musb, qh->type)) {
+ qh->hb_mult = hw_ep->max_packet_sz_tx
+ / packet_sz;
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((qh->hb_mult) - 1) << 11);
+ } else {
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
((qh->hb_mult - 1) << 11));
+ }
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
@@ -812,9 +845,28 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (load_count) {
/* PIO to load FIFO */
qh->segsize = load_count;
- musb_write_fifo(hw_ep, load_count, buf);
+ if (!buf) {
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ SG_MITER_ATOMIC
+ | SG_MITER_FROM_SG);
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller,
+ "error: sg"
+ "list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ goto finish;
+ }
+ buf = qh->sg_miter.addr + urb->sg->offset +
+ urb->actual_length;
+ load_count = min_t(u32, load_count,
+ qh->sg_miter.length);
+ musb_write_fifo(hw_ep, load_count, buf);
+ qh->sg_miter.consumed = load_count;
+ sg_miter_stop(&qh->sg_miter);
+ } else
+ musb_write_fifo(hw_ep, load_count, buf);
}
-
+finish:
/* re-enable interrupt */
musb_writew(mbase, MUSB_INTRTXE, int_txe);
@@ -850,42 +902,104 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* kick things off */
if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
- /* candidate for DMA */
- if (dma_channel) {
- dma_channel->actual_len = 0L;
- qh->segsize = len;
-
- /* AUTOREQ is in a DMA register */
- musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
- csr = musb_readw(hw_ep->regs,
- MUSB_RXCSR);
-
- /* unless caller treats short rx transfers as
- * errors, we dare not queue multiple transfers.
- */
- dma_ok = dma_controller->channel_program(
- dma_channel, packet_sz,
- !(urb->transfer_flags
- & URB_SHORT_NOT_OK),
- urb->transfer_dma + offset,
- qh->segsize);
- if (!dma_ok) {
- dma_controller->channel_release(
- dma_channel);
- hw_ep->rx_channel = NULL;
- dma_channel = NULL;
- } else
- csr |= MUSB_RXCSR_DMAENAB;
- }
+ /* Candidate for DMA */
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ /*
+ * Unless caller treats short RX transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(dma_channel,
+ packet_sz, !(urb->transfer_flags &
+ URB_SHORT_NOT_OK),
+ urb->transfer_dma + offset,
+ qh->segsize);
+ if (!dma_ok) {
+ dma_controller->channel_release(dma_channel);
+ hw_ep->rx_channel = dma_channel = NULL;
+ } else
+ csr |= MUSB_RXCSR_DMAENAB;
}
csr |= MUSB_RXCSR_H_REQPKT;
- DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+ dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
}
}
+/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
+ * the end; avoids starvation for other endpoints.
+ */
+static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ int is_in)
+{
+ struct dma_channel *dma;
+ struct urb *urb;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *cur_qh, *next_qh;
+ u16 rx_csr, tx_csr;
+
+ musb_ep_select(mbase, ep->epnum);
+ if (is_in) {
+ dma = is_dma_capable() ? ep->rx_channel : NULL;
+
+ /* clear nak timeout bit */
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ cur_qh = first_qh(&musb->in_bulk);
+ } else {
+ dma = is_dma_capable() ? ep->tx_channel : NULL;
+
+ /* clear nak timeout bit */
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ tx_csr |= MUSB_TXCSR_H_WZC_BITS;
+ tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+
+ cur_qh = first_qh(&musb->out_bulk);
+ }
+ if (cur_qh) {
+ urb = next_urb(cur_qh);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ urb->actual_length += dma->actual_len;
+ dma->actual_len = 0L;
+ }
+ musb_save_toggle(cur_qh, is_in, urb);
+
+ if (is_in) {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->in_bulk);
+
+ /* get the next qh from musb->in_bulk */
+ next_qh = first_qh(&musb->in_bulk);
+
+ /* set rx_reinit and schedule the next qh */
+ ep->rx_reinit = 1;
+ } else {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->out_bulk);
+
+ /* get the next qh from musb->out_bulk */
+ next_qh = first_qh(&musb->out_bulk);
+
+ /* set tx_reinit and schedule the next qh */
+ ep->tx_reinit = 1;
+ }
+ musb_start_urb(musb, is_in, next_qh);
+ }
+}
/*
* Service the default endpoint (ep0) as host.
@@ -923,15 +1037,15 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
request = (struct usb_ctrlrequest *) urb->setup_packet;
if (!request->wLength) {
- DBG(4, "start no-DATA\n");
+ dev_dbg(musb->controller, "start no-DATA\n");
break;
} else if (request->bRequestType & USB_DIR_IN) {
- DBG(4, "start IN-DATA\n");
+ dev_dbg(musb->controller, "start IN-DATA\n");
musb->ep0_stage = MUSB_EP0_IN;
more = true;
break;
} else {
- DBG(4, "start OUT-DATA\n");
+ dev_dbg(musb->controller, "start OUT-DATA\n");
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
@@ -943,7 +1057,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
- DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
+ dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
@@ -988,7 +1102,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
? musb_readb(epio, MUSB_COUNT0)
: 0;
- DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
csr, qh, len, urb, musb->ep0_stage);
/* if we just did status stage, we are done */
@@ -999,15 +1113,15 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
/* prepare status */
if (csr & MUSB_CSR0_H_RXSTALL) {
- DBG(6, "STALLING ENDPOINT\n");
+ dev_dbg(musb->controller, "STALLING ENDPOINT\n");
status = -EPIPE;
} else if (csr & MUSB_CSR0_H_ERROR) {
- DBG(2, "no response, csr0 %04x\n", csr);
+ dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
status = -EPROTO;
} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
- DBG(2, "control NAK timeout\n");
+ dev_dbg(musb->controller, "control NAK timeout\n");
/* NOTE: this code path would be a good place to PAUSE a
* control transfer, if another one is queued, so that
@@ -1022,7 +1136,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
}
if (status) {
- DBG(6, "aborting\n");
+ dev_dbg(musb->controller, "aborting\n");
retval = IRQ_HANDLED;
if (urb)
urb->status = status;
@@ -1069,10 +1183,13 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
+ /* disable ping token in status phase */
+ csr |= MUSB_CSR0_H_DIS_PING;
+
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
- DBG(5, "ep0 STATUS, csr %04x\n", csr);
+ dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
}
musb_writew(epio, MUSB_CSR0, csr);
@@ -1126,47 +1243,55 @@ void musb_host_tx(struct musb *musb, u8 epnum)
/* with CPPI, DMA sometimes triggers "extra" irqs */
if (!urb) {
- DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
return;
}
pipe = urb->pipe;
dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
- DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+ dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
dma ? ", dma" : "");
/* check for errors */
if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
/* dma was disabled, fifo flushed */
- DBG(3, "TX end %d stall\n", epnum);
+ dev_dbg(musb->controller, "TX end %d stall\n", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
/* (NON-ISO) dma was disabled, fifo flushed */
- DBG(3, "TX 3strikes on ep=%d\n", epnum);
+ dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
- DBG(6, "TX end=%d device not responding\n", epnum);
-
- /* NOTE: this code path would be a good place to PAUSE a
- * transfer, if there's some other (nonperiodic) tx urb
- * that could use this fifo. (dma complicates it...)
- * That's already done for bulk RX transfers.
- *
- * if (bulk && qh->ring.next != &musb->out_bulk), then
- * we have a candidate... NAKing is *NOT* an error
- */
- musb_ep_select(mbase, epnum);
- musb_writew(epio, MUSB_TXCSR,
- MUSB_TXCSR_H_WZC_BITS
- | MUSB_TXCSR_TXPKTRDY);
- return;
+ if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
+ && !list_is_singular(&musb->out_bulk)) {
+ dev_dbg(musb->controller,
+ "NAK timeout on TX%d ep\n", epnum);
+ musb_bulk_nak_timeout(musb, hw_ep, 0);
+ } else {
+ dev_dbg(musb->controller,
+ "TX end=%d device not responding\n", epnum);
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ * That's already done for bulk RX transfers.
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ }
+ return;
}
+done:
if (status) {
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
@@ -1195,7 +1320,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
/* second cppi case */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
return;
}
@@ -1254,7 +1379,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
* FIFO mode too...
*/
if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
- DBG(2, "DMA complete but packet still in FIFO, "
+ dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
"CSR %04x\n", tx_csr);
return;
}
@@ -1321,7 +1446,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
return;
}
} else if (tx_csr & MUSB_TXCSR_DMAENAB) {
- DBG(1, "not complete, but DMA enabled?\n");
+ dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
return;
}
@@ -1335,10 +1460,39 @@ void musb_host_tx(struct musb *musb, u8 epnum)
if (length > qh->maxpacket)
length = qh->maxpacket;
/* Unmap the buffer so that CPU can use it */
- unmap_urb_for_dma(musb_to_hcd(musb), urb);
- musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+ usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer)
+ qh->use_sg = true;
+
+ if (qh->use_sg) {
+ /* sg_miter_start is already done in musb_ep_program */
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ goto done;
+ }
+ urb->transfer_buffer = qh->sg_miter.addr;
+ length = min_t(u32, length, qh->sg_miter.length);
+ musb_write_fifo(hw_ep, length, urb->transfer_buffer);
+ qh->sg_miter.consumed = length;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+ }
+
qh->segsize = length;
+ if (qh->use_sg) {
+ if (offset + length >= urb->transfer_buffer_length)
+ qh->use_sg = false;
+ }
+
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
@@ -1384,50 +1538,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
#endif
-/* Schedule next QH from musb->in_bulk and move the current qh to
- * the end; avoids starvation for other endpoints.
- */
-static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
-{
- struct dma_channel *dma;
- struct urb *urb;
- void __iomem *mbase = musb->mregs;
- void __iomem *epio = ep->regs;
- struct musb_qh *cur_qh, *next_qh;
- u16 rx_csr;
-
- musb_ep_select(mbase, ep->epnum);
- dma = is_dma_capable() ? ep->rx_channel : NULL;
-
- /* clear nak timeout bit */
- rx_csr = musb_readw(epio, MUSB_RXCSR);
- rx_csr |= MUSB_RXCSR_H_WZC_BITS;
- rx_csr &= ~MUSB_RXCSR_DATAERROR;
- musb_writew(epio, MUSB_RXCSR, rx_csr);
-
- cur_qh = first_qh(&musb->in_bulk);
- if (cur_qh) {
- urb = next_urb(cur_qh);
- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- dma->status = MUSB_DMA_STATUS_CORE_ABORT;
- musb->dma_controller->channel_abort(dma);
- urb->actual_length += dma->actual_len;
- dma->actual_len = 0L;
- }
- musb_save_toggle(cur_qh, 1, urb);
-
- /* move cur_qh to end of queue */
- list_move_tail(&cur_qh->ring, &musb->in_bulk);
-
- /* get the next qh from musb->in_bulk */
- next_qh = first_qh(&musb->in_bulk);
-
- /* set rx_reinit and schedule the next qh */
- ep->rx_reinit = 1;
- musb_start_urb(musb, 1, next_qh);
- }
-}
-
/*
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
* and high-bandwidth IN transfer cases.
@@ -1446,6 +1556,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
bool done = false;
u32 status;
struct dma_channel *dma;
+ unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
musb_ep_select(mbase, epnum);
@@ -1462,7 +1573,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
* usbtest #11 (unlinks) triggers it regularly, sometimes
* with fifo full. (Only with DMA??)
*/
- DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+ dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
musb_readw(epio, MUSB_RXCOUNT));
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
return;
@@ -1470,20 +1581,20 @@ void musb_host_rx(struct musb *musb, u8 epnum)
pipe = urb->pipe;
- DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+ dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
epnum, rx_csr, urb->actual_length,
dma ? dma->actual_len : 0);
/* check for errors, concurrent stall & unlink is not really
* handled yet! */
if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
- DBG(3, "RX end %d STALL\n", epnum);
+ dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
- DBG(3, "end %d RX proto error\n", epnum);
+ dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
@@ -1491,7 +1602,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
- DBG(6, "RX end %d NAK timeout\n", epnum);
+ dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
/* NOTE: NAKing is *NOT* an error, so we want to
* continue. Except ... if there's a request for
@@ -1504,7 +1615,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (usb_pipebulk(urb->pipe)
&& qh->mux == 1
&& !list_is_singular(&musb->in_bulk)) {
- musb_bulk_rx_nak_timeout(musb, hw_ep);
+ musb_bulk_nak_timeout(musb, hw_ep, 1);
return;
}
musb_ep_select(mbase, epnum);
@@ -1514,12 +1625,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
goto finish;
} else {
- DBG(4, "RX end %d ISO data error\n", epnum);
+ dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
/* packet error reported later */
iso_err = true;
}
} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
- DBG(3, "end %d high bandwidth incomplete ISO packet RX\n",
+ dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
epnum);
status = -EPROTO;
}
@@ -1551,7 +1662,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
-#ifndef CONFIG_USB_INVENTRA_DMA
+#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
@@ -1565,7 +1676,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
done = true;
}
- DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+ dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
xfer_len, dma ? ", dma" : "");
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
@@ -1583,7 +1694,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
@@ -1593,13 +1705,33 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* even if there was an error, we did the dma
* for iso_frame_desc->length
*/
- if (d->status != EILSEQ && d->status != -EOVERFLOW)
+ if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
- if (++qh->iso_idx >= urb->number_of_packets)
+ if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
- else
+ } else {
+#if defined(CONFIG_USB_TI_CPPI41_DMA)
+ struct dma_controller *c;
+ dma_addr_t *buf;
+ u32 length, ret;
+
+ c = musb->dma_controller;
+ buf = (void *)
+ urb->iso_frame_desc[qh->iso_idx].offset
+ + (u32)urb->transfer_dma;
+
+ length =
+ urb->iso_frame_desc[qh->iso_idx].length;
+
+ val |= MUSB_RXCSR_DMAENAB;
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+ ret = c->channel_program(dma, qh->maxpacket,
+ 0, (u32) buf, length);
+#endif
done = false;
+ }
} else {
/* done if urb buffer is full or short packet is recd */
@@ -1615,7 +1747,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
MUSB_RXCSR_H_WZC_BITS | val);
}
- DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+ dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
done ? "off" : "reset",
musb_readw(epio, MUSB_RXCSR),
musb_readw(epio, MUSB_RXCOUNT));
@@ -1639,7 +1771,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
if (dma) {
struct dma_controller *c;
u16 rx_count;
@@ -1648,10 +1781,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
rx_count = musb_readw(epio, MUSB_RXCOUNT);
- DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+ dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n",
epnum, rx_count,
- urb->transfer_dma
- + urb->actual_length,
+ (unsigned long long) urb->transfer_dma
+ + urb->actual_length,
qh->offset,
urb->transfer_buffer_length);
@@ -1672,7 +1805,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
d_status = -EOVERFLOW;
urb->error_count++;
}
- DBG(2, "** OVERFLOW %d into %d\n",\
+ dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
rx_count, d->length);
length = d->length;
@@ -1750,17 +1883,54 @@ void musb_host_rx(struct musb *musb, u8 epnum)
c->channel_release(dma);
hw_ep->rx_channel = NULL;
dma = NULL;
- /* REVISIT reset CSR */
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+ musb_writew(epio, MUSB_RXCSR, val);
}
}
#endif /* Mentor DMA */
if (!dma) {
+ unsigned int received_len;
+
/* Unmap the buffer so that CPU can use it */
- unmap_urb_for_dma(musb_to_hcd(musb), urb);
- done = musb_host_packet_rx(musb, urb,
- epnum, iso_err);
- DBG(6, "read %spacket\n", done ? "last " : "");
+ usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer) {
+ qh->use_sg = true;
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ sg_flags);
+ }
+
+ if (qh->use_sg) {
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ done = true;
+ goto finish;
+ }
+ urb->transfer_buffer = qh->sg_miter.addr;
+ received_len = urb->actual_length;
+ qh->offset = 0x0;
+ done = musb_host_packet_rx(musb, urb, epnum,
+ iso_err);
+ /* Calculate the number of bytes received */
+ received_len = urb->actual_length -
+ received_len;
+ qh->sg_miter.consumed = received_len;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ done = musb_host_packet_rx(musb, urb,
+ epnum, iso_err);
+ }
+ dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
}
}
@@ -1768,6 +1938,9 @@ finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
+ if (qh->use_sg)
+ qh->use_sg = false;
+
if (urb->status == -EINPROGRESS)
urb->status = status;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
@@ -1863,14 +2036,14 @@ static int musb_schedule(
else
head = &musb->out_bulk;
- /* Enable bulk RX NAK timeout scheme when bulk requests are
- * multiplexed. This scheme doen't work in high speed to full
+ /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
+ * multiplexed. This scheme does not work in high speed to full
* speed scenario as NAK interrupts are not coming from a
* full speed device connected to a high speed device.
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
* 4 (8 frame or 8ms) for FS device.
*/
- if (is_in && qh->dev)
+ if (qh->dev)
qh->intv_reg =
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;
@@ -1881,7 +2054,7 @@ static int musb_schedule(
idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end;
- DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+ dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
success:
if (head) {
idle = list_empty(head);
@@ -1950,7 +2123,7 @@ static int musb_urb_enqueue(
INIT_LIST_HEAD(&qh->ring);
qh->is_ready = 1;
- qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+ qh->maxpacket = usb_endpoint_maxp(epd);
qh->type = usb_endpoint_type(epd);
/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
@@ -2049,7 +2222,7 @@ static int musb_urb_enqueue(
* we only have work to do in the former case.
*/
spin_lock_irqsave(&musb->lock, flags);
- if (hep->hcpriv) {
+ if (hep->hcpriv || !next_urb(qh)) {
/* some concurrent activity submitted another urb to hep...
* odd, rare, error prone, but legal.
*/
@@ -2087,6 +2260,7 @@ done:
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{
struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
unsigned hw_end = ep->epnum;
void __iomem *regs = ep->musb->mregs;
@@ -2102,7 +2276,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
- DBG(status ? 1 : 3,
+ dev_dbg(musb->controller,
"abort %cX%d DMA for urb %p --> %d\n",
is_in ? 'R' : 'T', ep->epnum,
urb, status);
@@ -2111,7 +2285,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
}
/* turn off DMA requests, discard state, stop polling ... */
- if (is_in) {
+ if (ep->epnum && is_in) {
/* giveback saves bulk toggle */
csr = musb_h_flush_rxfifo(ep, 0);
@@ -2149,7 +2323,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
int is_in = usb_pipein(urb->pipe);
int ret;
- DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+ dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
is_in ? "in" : "out");
@@ -2283,6 +2457,8 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
+ musb_port_suspend(musb, true);
+
if (!is_host_active(musb))
return 0;
@@ -2304,7 +2480,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
if (musb->is_active) {
WARNING("trying to suspend as %s while active\n",
- otg_state_string(musb));
+ usb_otg_state_string(musb->xceiv->state));
return -EBUSY;
} else
return 0;
@@ -2312,14 +2488,130 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
static int musb_bus_resume(struct usb_hcd *hcd)
{
- /* resuming child port does the work */
+ struct musb *musb = hcd_to_musb(hcd);
+
+ if (musb->config &&
+ musb->config->host_port_deassert_reset_at_resume)
+ musb_port_reset(musb, false);
+
+ return 0;
+}
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+
+#define MUSB_USB_DMA_ALIGN 4
+
+struct musb_temp_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
+static void musb_free_temp_buffer(struct urb *urb)
+{
+ enum dma_data_direction dir;
+ struct musb_temp_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
+ data);
+
+ if (dir == DMA_FROM_DEVICE) {
+ memcpy(temp->old_xfer_buffer, temp->data,
+ urb->transfer_buffer_length);
+ }
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ enum dma_data_direction dir;
+ struct musb_temp_buffer *temp;
+ void *kmalloc_ptr;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
+ return 0;
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct temp_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
+
+
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (dir == DMA_TO_DEVICE)
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
return 0;
}
-const struct hc_driver musb_hc_driver = {
+static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int ret;
+
+ /*
+ * The DMA engine in RTL1.8 and above cannot handle
+ * DMA addresses that are not aligned to a 4 byte boundary.
+ * For such engine implemented (un)map_urb_for_dma hooks.
+ * Do not use these hooks for RTL<1.8
+ */
+ if (musb->hwvers < MUSB_HWVERS_1800)
+ return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+
+ ret = musb_alloc_temp_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ musb_free_temp_buffer(urb);
+
+ return ret;
+}
+
+static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+
+ /* Do not use this hook for RTL<1.8 (see description above) */
+ if (musb->hwvers < MUSB_HWVERS_1800)
+ return;
+
+ musb_free_temp_buffer(urb);
+}
+#endif /* !CONFIG_MUSB_PIO_ONLY */
+
+static const struct hc_driver musb_hc_driver = {
.description = "musb-hcd",
.product_desc = "MUSB HDRC host driver",
- .hcd_priv_size = sizeof(struct musb),
+ .hcd_priv_size = sizeof(struct musb *),
.flags = HCD_USB2 | HCD_MEMORY,
/* not using irq handler or reset hooks from usbcore, since
@@ -2335,6 +2627,11 @@ const struct hc_driver musb_hc_driver = {
.urb_dequeue = musb_urb_dequeue,
.endpoint_disable = musb_h_disable,
+#ifndef CONFIG_MUSB_PIO_ONLY
+ .map_urb_for_dma = musb_map_urb_for_dma,
+ .unmap_urb_for_dma = musb_unmap_urb_for_dma,
+#endif
+
.hub_status_data = musb_hub_status_data,
.hub_control = musb_hub_control,
.bus_suspend = musb_bus_suspend,
@@ -2342,3 +2639,69 @@ const struct hc_driver musb_hc_driver = {
/* .start_port_reset = NULL, */
/* .hub_irq_enable = NULL, */
};
+
+int musb_host_alloc(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+
+ /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+ musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
+ if (!musb->hcd)
+ return -EINVAL;
+
+ *musb->hcd->hcd_priv = (unsigned long) musb;
+ musb->hcd->self.uses_pio_for_control = 1;
+ musb->hcd->uses_new_polling = 1;
+ musb->hcd->has_tt = 1;
+
+ return 0;
+}
+
+void musb_host_cleanup(struct musb *musb)
+{
+ if (musb->port_mode == MUSB_PORT_MODE_GADGET)
+ return;
+ usb_remove_hcd(musb->hcd);
+ musb->hcd = NULL;
+}
+
+void musb_host_free(struct musb *musb)
+{
+ usb_put_hcd(musb->hcd);
+}
+
+int musb_host_setup(struct musb *musb, int power_budget)
+{
+ int ret;
+ struct usb_hcd *hcd = musb->hcd;
+
+ MUSB_HST_MODE(musb);
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+
+ otg_set_host(musb->xceiv->otg, &hcd->self);
+ hcd->self.otg_port = 1;
+ musb->xceiv->otg->host = &hcd->self;
+ hcd->power_budget = 2 * (power_budget ? : 250);
+
+ ret = usb_add_hcd(hcd, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ device_wakeup_enable(hcd->self.controller);
+ return 0;
+}
+
+void musb_host_resume_root_hub(struct musb *musb)
+{
+ usb_hcd_resume_root_hub(musb->hcd);
+}
+
+void musb_host_poke_root_hub(struct musb *musb)
+{
+ MUSB_HST_MODE(musb);
+ if (musb->hcd->status_urb)
+ usb_hcd_poll_rh_status(musb->hcd);
+ else
+ usb_hcd_resume_root_hub(musb->hcd);
+}