diff options
Diffstat (limited to 'drivers/usb/musb/musb_gadget.c')
| -rw-r--r-- | drivers/usb/musb/musb_gadget.c | 519 |
1 files changed, 220 insertions, 299 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index ac3d2eec20f..d4aa779339f 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -46,48 +46,6 @@ #include "musb_core.h" -/* MUSB PERIPHERAL status 3-mar-2006: - * - * - EP0 seems solid. It passes both USBCV and usbtest control cases. - * Minor glitches: - * - * + remote wakeup to Linux hosts work, but saw USBCV failures; - * in one test run (operator error?) - * + endpoint halt tests -- in both usbtest and usbcv -- seem - * to break when dma is enabled ... is something wrongly - * clearing SENDSTALL? - * - * - Mass storage behaved ok when last tested. Network traffic patterns - * (with lots of short transfers etc) need retesting; they turn up the - * worst cases of the DMA, since short packets are typical but are not - * required. - * - * - TX/IN - * + both pio and dma behave in with network and g_zero tests - * + no cppi throughput issues other than no-hw-queueing - * + failed with FLAT_REG (DaVinci) - * + seems to behave with double buffering, PIO -and- CPPI - * + with gadgetfs + AIO, requests got lost? - * - * - RX/OUT - * + both pio and dma behave in with network and g_zero tests - * + dma is slow in typical case (short_not_ok is clear) - * + double buffering ok with PIO - * + double buffering *FAILS* with CPPI, wrong data bytes sometimes - * + request lossage observed with gadgetfs - * - * - ISO not tested ... might work, but only weakly isochronous - * - * - Gadget driver disabling of softconnect during bind() is ignored; so - * drivers can't hold off host requests until userspace is ready. - * (Workaround: they can turn it off later.) - * - * - PORTABILITY (assumes PIO works): - * + DaVinci, basically works with cppi dma - * + OMAP 2430, ditto with mentor dma - * + TUSB 6010, platform-specific dma in the works - */ - /* ----------------------------------------------------------------------- */ #define is_buffer_mapped(req) (is_dma_capable() && \ @@ -118,13 +76,21 @@ static inline void map_dma_buffer(struct musb_request *request, return; if (request->request.dma == DMA_ADDR_INVALID) { - request->request.dma = dma_map_single( + dma_addr_t dma_addr; + int ret; + + dma_addr = dma_map_single( musb->controller, request->request.buf, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + ret = dma_mapping_error(musb->controller, dma_addr); + if (ret) + return; + + request->request.dma = dma_addr; request->map_state = MUSB_MAPPED; } else { dma_sync_single_for_device(musb->controller, @@ -141,7 +107,9 @@ static inline void map_dma_buffer(struct musb_request *request, static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { - if (!is_buffer_mapped(request)) + struct musb_ep *musb_ep = request->ep; + + if (!is_buffer_mapped(request) || !musb_ep->dma) return; if (request->request.dma == DMA_ADDR_INVALID) { @@ -195,7 +163,10 @@ __acquires(ep->musb->lock) ep->busy = 1; spin_unlock(&musb->lock); - unmap_dma_buffer(req, musb); + + if (!dma_mapping_error(&musb->g.dev, request->dma)) + unmap_dma_buffer(req, musb); + if (request->status == 0) dev_dbg(musb->controller, "%s done request %p, %d/%d\n", ep->end_point.name, request, @@ -275,41 +246,6 @@ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) return ep->packet_sz; } - -#ifdef CONFIG_USB_INVENTRA_DMA - -/* Peripheral tx (IN) using Mentor DMA works as follows: - Only mode 0 is used for transfers <= wPktSize, - mode 1 is used for larger transfers, - - One of the following happens: - - Host sends IN token which causes an endpoint interrupt - -> TxAvail - -> if DMA is currently busy, exit. - -> if queue is non-empty, txstate(). - - - Request is queued by the gadget driver. - -> if queue was previously empty, txstate() - - txstate() - -> start - /\ -> setup DMA - | (data is transferred to the FIFO, then sent out when - | IN token(s) are recd from Host. - | -> DMA interrupt on completion - | calls TxAvail. - | -> stop DMA, ~DMAENAB, - | -> set TxPktRdy for last short pkt or zlp - | -> Complete Request - | -> Continue next request (call txstate) - |___________________________________| - - * Non-Mentor DMA engines can of course work differently, such as by - * upleveling from irq-per-packet to irq-per-buffer. - */ - -#endif - /* * An endpoint is transmitting data. This can be called either from * the IRQ routine or from ep.queue() to kickstart a request on an @@ -328,6 +264,13 @@ static void txstate(struct musb *musb, struct musb_request *req) musb_ep = req->ep; + /* Check if EP is disabled */ + if (!musb_ep->desc) { + dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", + musb_ep->end_point.name); + return; + } + /* we shouldn't get here while DMA is active ... but we do ... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { dev_dbg(musb->controller, "dma pending...\n"); @@ -366,7 +309,7 @@ static void txstate(struct musb *musb, struct musb_request *req) request_size = min_t(size_t, request->length - request->actual, musb_ep->dma->max_len); - use_dma = (request->dma != DMA_ADDR_INVALID); + use_dma = (request->dma != DMA_ADDR_INVALID && request_size); /* MUSB_TXCSR_P_ISO is still set correctly */ @@ -401,7 +344,19 @@ static void txstate(struct musb *musb, struct musb_request *req) csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); - if (!musb_ep->hb_mult) + /* + * Enable Autoset according to table + * below + * bulk_split hb_mult Autoset_Enable + * 0 0 Yes(Normal) + * 0 >0 No(High BW ISO) + * 1 0 Yes(HS bulk) + * 1 >0 Yes(FS bulk) + */ + if (!musb_ep->hb_mult || + (musb_ep->hb_mult && + can_bulk_split(musb, + musb_ep->type))) csr |= MUSB_TXCSR_AUTOSET; } csr &= ~MUSB_TXCSR_P_UNDERRUN; @@ -410,47 +365,49 @@ static void txstate(struct musb *musb, struct musb_request *req) } } -#elif defined(CONFIG_USB_TI_CPPI_DMA) - /* program endpoint CSR first, then setup DMA */ - csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); - csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | - MUSB_TXCSR_MODE; - musb_writew(epio, MUSB_TXCSR, - (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) - | csr); - - /* ensure writebuffer is empty */ - csr = musb_readw(epio, MUSB_TXCSR); - - /* NOTE host side sets DMAENAB later than this; both are - * OK since the transfer dma glue (between CPPI and Mentor - * fifos) just tells CPPI it could start. Data only moves - * to the USB TX fifo when both fifos are ready. - */ - - /* "mode" is irrelevant here; handle terminating ZLPs like - * PIO does, since the hardware RNDIS mode seems unreliable - * except for the last-packet-is-already-short case. - */ - use_dma = use_dma && c->channel_program( - musb_ep->dma, musb_ep->packet_sz, - 0, - request->dma + request->actual, - request_size); - if (!use_dma) { - c->channel_release(musb_ep->dma); - musb_ep->dma = NULL; - csr &= ~MUSB_TXCSR_DMAENAB; - musb_writew(epio, MUSB_TXCSR, csr); - /* invariant: prequest->buf is non-null */ - } -#elif defined(CONFIG_USB_TUSB_OMAP_DMA) - use_dma = use_dma && c->channel_program( - musb_ep->dma, musb_ep->packet_sz, - request->zero, - request->dma + request->actual, - request_size); #endif + if (is_cppi_enabled()) { + /* program endpoint CSR first, then setup DMA */ + csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); + csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | + MUSB_TXCSR_MODE; + musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & + ~MUSB_TXCSR_P_UNDERRUN) | csr); + + /* ensure writebuffer is empty */ + csr = musb_readw(epio, MUSB_TXCSR); + + /* + * NOTE host side sets DMAENAB later than this; both are + * OK since the transfer dma glue (between CPPI and + * Mentor fifos) just tells CPPI it could start. Data + * only moves to the USB TX fifo when both fifos are + * ready. + */ + /* + * "mode" is irrelevant here; handle terminating ZLPs + * like PIO does, since the hardware RNDIS mode seems + * unreliable except for the + * last-packet-is-already-short case. + */ + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + 0, + request->dma + request->actual, + request_size); + if (!use_dma) { + c->channel_release(musb_ep->dma); + musb_ep->dma = NULL; + csr &= ~MUSB_TXCSR_DMAENAB; + musb_writew(epio, MUSB_TXCSR, csr); + /* invariant: prequest->buf is non-null */ + } + } else if (tusb_dma_omap()) + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + request->zero, + request->dma + request->actual, + request_size); } #endif @@ -574,6 +531,15 @@ void musb_g_tx(struct musb *musb, u8 epnum) if (request->actual == request->length) { musb_g_giveback(musb_ep, request, 0); + /* + * In the giveback function the MUSB lock is + * released and acquired after sometime. During + * this time period the INDEX register could get + * changed by the gadget_queue function especially + * on SMP systems. Reselect the INDEX to be sure + * we are reading/modifying the right registers + */ + musb_ep_select(mbase, epnum); req = musb_ep->desc ? next_request(musb_ep) : NULL; if (!req) { dev_dbg(musb->controller, "%s idle now\n", @@ -588,37 +554,6 @@ void musb_g_tx(struct musb *musb, u8 epnum) /* ------------------------------------------------------------ */ -#ifdef CONFIG_USB_INVENTRA_DMA - -/* Peripheral rx (OUT) using Mentor DMA works as follows: - - Only mode 0 is used. - - - Request is queued by the gadget class driver. - -> if queue was previously empty, rxstate() - - - Host sends OUT token which causes an endpoint interrupt - /\ -> RxReady - | -> if request queued, call rxstate - | /\ -> setup DMA - | | -> DMA interrupt on completion - | | -> RxReady - | | -> stop DMA - | | -> ack the read - | | -> if data recd = max expected - | | by the request, or host - | | sent a short packet, - | | complete the request, - | | and start the next one. - | |_____________________________________| - | else just wait for the host - | to send the next OUT token. - |__________________________________________________| - - * Non-Mentor DMA engines can of course work differently. - */ - -#endif - /* * Context: controller locked, IRQs blocked, endpoint selected */ @@ -628,8 +563,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) struct usb_request *request = &req->request; struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; - unsigned fifo_count = 0; - u16 len; + unsigned len = 0; + u16 fifo_count; u16 csr = musb_readw(epio, MUSB_RXCSR); struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; u8 use_mode_1; @@ -639,7 +574,14 @@ static void rxstate(struct musb *musb, struct musb_request *req) else musb_ep = &hw_ep->ep_out; - len = musb_ep->packet_sz; + fifo_count = musb_ep->packet_sz; + + /* Check if EP is disabled */ + if (!musb_ep->desc) { + dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", + musb_ep->end_point.name); + return; + } /* We shouldn't get here while DMA is active, but we do... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { @@ -681,7 +623,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) } if (csr & MUSB_RXCSR_RXPKTRDY) { - len = musb_readw(epio, MUSB_RXCOUNT); + fifo_count = musb_readw(epio, MUSB_RXCOUNT); /* * Enable Mode 1 on RX transfers only when short_not_ok flag @@ -689,7 +631,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) * file_storage and f_mass_storage drivers */ - if (request->short_not_ok && len == musb_ep->packet_sz) + if (request->short_not_ok && fifo_count == musb_ep->packet_sz) use_mode_1 = 1; else use_mode_1 = 0; @@ -700,6 +642,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) struct dma_controller *c; struct dma_channel *channel; int use_dma = 0; + unsigned int transfer_size; c = musb->dma_controller; channel = musb_ep->dma; @@ -741,35 +684,31 @@ static void rxstate(struct musb *musb, struct musb_request *req) csr | MUSB_RXCSR_DMAMODE); musb_writew(epio, MUSB_RXCSR, csr); + transfer_size = min_t(unsigned int, + request->length - + request->actual, + channel->max_len); + musb_ep->dma->desired_mode = 1; } else { if (!musb_ep->hb_mult && musb_ep->hw_ep->rx_double_buffered) csr |= MUSB_RXCSR_AUTOCLEAR; csr |= MUSB_RXCSR_DMAENAB; musb_writew(epio, MUSB_RXCSR, csr); - } - if (request->actual < request->length) { - int transfer_size = 0; - if (use_mode_1) { - transfer_size = min(request->length - request->actual, - channel->max_len); - musb_ep->dma->desired_mode = 1; - } else { - transfer_size = min(request->length - request->actual, - (unsigned)len); - musb_ep->dma->desired_mode = 0; - } - - use_dma = c->channel_program( - channel, - musb_ep->packet_sz, - channel->desired_mode, - request->dma - + request->actual, - transfer_size); + transfer_size = min(request->length - request->actual, + (unsigned)fifo_count); + musb_ep->dma->desired_mode = 0; } + use_dma = c->channel_program( + channel, + musb_ep->packet_sz, + channel->desired_mode, + request->dma + + request->actual, + transfer_size); + if (use_dma) return; } @@ -779,22 +718,24 @@ static void rxstate(struct musb *musb, struct musb_request *req) struct dma_controller *c; struct dma_channel *channel; - int transfer_size = 0; + unsigned int transfer_size = 0; c = musb->dma_controller; channel = musb_ep->dma; /* In case first packet is short */ - if (len < musb_ep->packet_sz) - transfer_size = len; + if (fifo_count < musb_ep->packet_sz) + transfer_size = fifo_count; else if (request->short_not_ok) - transfer_size = min(request->length - + transfer_size = min_t(unsigned int, + request->length - request->actual, channel->max_len); else - transfer_size = min(request->length - + transfer_size = min_t(unsigned int, + request->length - request->actual, - (unsigned)len); + (unsigned)fifo_count); csr &= ~MUSB_RXCSR_DMAMODE; csr |= (MUSB_RXCSR_DMAENAB | @@ -822,10 +763,10 @@ static void rxstate(struct musb *musb, struct musb_request *req) } #endif /* Mentor's DMA */ - fifo_count = request->length - request->actual; + len = request->length - request->actual; dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", musb_ep->end_point.name, - len, fifo_count, + fifo_count, len, musb_ep->packet_sz); fifo_count = min_t(unsigned, len, fifo_count); @@ -878,7 +819,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) } /* reach the end or short packet detected */ - if (request->actual == request->length || len < musb_ep->packet_sz) + if (request->actual == request->length || + fifo_count < musb_ep->packet_sz) musb_g_giveback(musb_ep, request, 0); } @@ -983,6 +925,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) } #endif musb_g_giveback(musb_ep, request, 0); + /* + * In the giveback function the MUSB lock is + * released and acquired after sometime. During + * this time period the INDEX register could get + * changed by the gadget_queue function especially + * on SMP systems. Reselect the INDEX to be sure + * we are reading/modifying the right registers + */ + musb_ep_select(mbase, epnum); req = next_request(musb_ep); if (!req) @@ -1061,7 +1012,6 @@ static int musb_gadget_enable(struct usb_ep *ep, */ musb_ep_select(mbase, epnum); if (usb_endpoint_dir_in(desc)) { - u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); if (hw_ep->is_shared_fifo) musb_ep->is_in = 1; @@ -1073,8 +1023,8 @@ static int musb_gadget_enable(struct usb_ep *ep, goto fail; } - int_txe |= (1 << epnum); - musb_writew(mbase, MUSB_INTRTXE, int_txe); + musb->intrtxe |= (1 << epnum); + musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); /* REVISIT if can_bulk_split(), use by updating "tmp"; * likewise high bandwidth periodic tx @@ -1082,11 +1032,15 @@ static int musb_gadget_enable(struct usb_ep *ep, /* Set TXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ - if (musb->double_buffer_not_ok) + if (musb->double_buffer_not_ok) { musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); - else + } else { + if (can_bulk_split(musb, musb_ep->type)) + musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / + musb_ep->packet_sz) - 1; musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); + } csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; if (musb_readw(regs, MUSB_TXCSR) @@ -1101,7 +1055,6 @@ static int musb_gadget_enable(struct usb_ep *ep, musb_writew(regs, MUSB_TXCSR, csr); } else { - u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); if (hw_ep->is_shared_fifo) musb_ep->is_in = 0; @@ -1113,8 +1066,8 @@ static int musb_gadget_enable(struct usb_ep *ep, goto fail; } - int_rxe |= (1 << epnum); - musb_writew(mbase, MUSB_INTRRXE, int_rxe); + musb->intrrxe |= (1 << epnum); + musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); /* REVISIT if can_bulk_combine() use by updating "tmp" * likewise high bandwidth periodic rx @@ -1168,7 +1121,7 @@ static int musb_gadget_enable(struct usb_ep *ep, case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; case USB_ENDPOINT_XFER_INT: s = "int"; break; default: s = "iso"; break; - }; s; }), + } s; }), musb_ep->is_in ? "IN" : "OUT", musb_ep->dma ? "dma, " : "", musb_ep->packet_sz); @@ -1202,18 +1155,17 @@ static int musb_gadget_disable(struct usb_ep *ep) /* zero the endpoint sizes */ if (musb_ep->is_in) { - u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); - int_txe &= ~(1 << epnum); - musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); + musb->intrtxe &= ~(1 << epnum); + musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); musb_writew(epio, MUSB_TXMAXP, 0); } else { - u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); - int_rxe &= ~(1 << epnum); - musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); + musb->intrrxe &= ~(1 << epnum); + musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); musb_writew(epio, MUSB_RXMAXP, 0); } musb_ep->desc = NULL; + musb_ep->end_point.desc = NULL; /* abort all pending DMA and requests */ nuke(musb_ep, -ESHUTDOWN); @@ -1324,7 +1276,8 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", req, ep->name, "disabled"); status = -ESHUTDOWN; - goto cleanup; + unmap_dma_buffer(request, musb); + goto unlock; } /* add request to the list */ @@ -1334,7 +1287,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, if (!musb_ep->busy && &request->list == musb_ep->req_list.next) musb_ep_restart(musb, request); -cleanup: +unlock: spin_unlock_irqrestore(&musb->lock, lockflags); return status; } @@ -1524,7 +1477,7 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep) void __iomem *epio = musb->endpoints[epnum].regs; void __iomem *mbase; unsigned long flags; - u16 csr, int_txe; + u16 csr; mbase = musb->mregs; @@ -1532,8 +1485,7 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep) musb_ep_select(mbase, (u8) epnum); /* disable interrupts */ - int_txe = musb_readw(mbase, MUSB_INTRTXE); - musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); + musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); if (musb_ep->is_in) { csr = musb_readw(epio, MUSB_TXCSR); @@ -1557,7 +1509,7 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep) } /* re-enable interrupt */ - musb_writew(mbase, MUSB_INTRTXE, int_txe); + musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); spin_unlock_irqrestore(&musb->lock, flags); } @@ -1624,7 +1576,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) } spin_unlock_irqrestore(&musb->lock, flags); - otg_start_srp(musb->xceiv); + otg_start_srp(musb->xceiv->otg); spin_lock_irqsave(&musb->lock, flags); /* Block idling for at least 1s */ @@ -1635,7 +1587,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) goto done; default: dev_dbg(musb->controller, "Unhandled wake: %s\n", - otg_state_string(musb->xceiv->state)); + usb_otg_state_string(musb->xceiv->state)); goto done; } @@ -1703,7 +1655,7 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) if (!musb->xceiv->set_power) return -EOPNOTSUPP; - return otg_set_power(musb->xceiv, mA); + return usb_phy_set_power(musb->xceiv, mA); } static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) @@ -1755,14 +1707,7 @@ static const struct usb_gadget_ops musb_gadget_operations = { * all peripheral ports are external... */ -static void musb_gadget_release(struct device *dev) -{ - /* kref_put(WHAT) */ - dev_dbg(dev, "%s\n", __func__); -} - - -static void __init +static void init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) { struct musb_hw_ep *hw_ep = musb->endpoints + epnum; @@ -1782,14 +1727,14 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) ep->end_point.name = ep->name; INIT_LIST_HEAD(&ep->end_point.ep_list); if (!epnum) { - ep->end_point.maxpacket = 64; + usb_ep_set_maxpacket_limit(&ep->end_point, 64); ep->end_point.ops = &musb_g_ep0_ops; musb->g.ep0 = &ep->end_point; } else { if (is_in) - ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; + usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx); else - ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; + usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx); ep->end_point.ops = &musb_ep_ops; list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); } @@ -1799,7 +1744,7 @@ init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) * Initialize the endpoints exposed to peripheral drivers, with backlinks * to the rest of the driver state. */ -static inline void __init musb_g_init_endpoints(struct musb *musb) +static inline void musb_g_init_endpoints(struct musb *musb) { u8 epnum; struct musb_hw_ep *hw_ep; @@ -1832,7 +1777,7 @@ static inline void __init musb_g_init_endpoints(struct musb *musb) /* called once during driver setup to initialize and link into * the driver model; memory is zeroed. */ -int __init musb_gadget_setup(struct musb *musb) +int musb_gadget_setup(struct musb *musb) { int status; @@ -1845,26 +1790,23 @@ int __init musb_gadget_setup(struct musb *musb) musb->g.max_speed = USB_SPEED_HIGH; musb->g.speed = USB_SPEED_UNKNOWN; + MUSB_DEV_MODE(musb); + musb->xceiv->otg->default_a = 0; + musb->xceiv->state = OTG_STATE_B_IDLE; + /* this "gadget" abstracts/virtualizes the controller */ - dev_set_name(&musb->g.dev, "gadget"); - musb->g.dev.parent = musb->controller; - musb->g.dev.dma_mask = musb->controller->dma_mask; - musb->g.dev.release = musb_gadget_release; musb->g.name = musb_driver_name; - - if (is_otg_enabled(musb)) - musb->g.is_otg = 1; +#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE) + musb->g.is_otg = 1; +#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) + musb->g.is_otg = 0; +#endif musb_g_init_endpoints(musb); musb->is_active = 0; musb_platform_try_idle(musb, 0); - status = device_register(&musb->g.dev); - if (status != 0) { - put_device(&musb->g.dev); - return status; - } status = usb_add_gadget_udc(musb->controller, &musb->g); if (status) goto err; @@ -1878,9 +1820,9 @@ err: void musb_gadget_cleanup(struct musb *musb) { + if (musb->port_mode == MUSB_PORT_MODE_HOST) + return; usb_del_gadget_udc(&musb->g); - if (musb->g.dev.parent) - device_unregister(&musb->g.dev); } /* @@ -1898,11 +1840,14 @@ static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { struct musb *musb = gadget_to_musb(g); + struct usb_otg *otg = musb->xceiv->otg; unsigned long flags; - int retval = -EINVAL; + int retval = 0; - if (driver->max_speed < USB_SPEED_HIGH) - goto err0; + if (driver->max_speed < USB_SPEED_HIGH) { + retval = -EINVAL; + goto err; + } pm_runtime_get_sync(musb->controller); @@ -1914,51 +1859,25 @@ static int musb_gadget_start(struct usb_gadget *g, spin_lock_irqsave(&musb->lock, flags); musb->is_active = 1; - otg_set_peripheral(musb->xceiv, &musb->g); + otg_set_peripheral(otg, &musb->g); musb->xceiv->state = OTG_STATE_B_IDLE; - - /* - * FIXME this ignores the softconnect flag. Drivers are - * allowed hold the peripheral inactive until for example - * userspace hooks up printer hardware or DSP codecs, so - * hosts only see fully functional devices. - */ - - if (!is_otg_enabled(musb)) - musb_start(musb); - spin_unlock_irqrestore(&musb->lock, flags); - if (is_otg_enabled(musb)) { - struct usb_hcd *hcd = musb_to_hcd(musb); - - dev_dbg(musb->controller, "OTG startup...\n"); - - /* REVISIT: funcall to other code, which also - * handles power budgeting ... this way also - * ensures HdrcStart is indirectly called. - */ - retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); - if (retval < 0) { - dev_dbg(musb->controller, "add_hcd failed, %d\n", retval); - goto err2; - } + musb_start(musb); - if ((musb->xceiv->last_event == USB_EVENT_ID) - && musb->xceiv->set_vbus) - otg_set_vbus(musb->xceiv, 1); + /* REVISIT: funcall to other code, which also + * handles power budgeting ... this way also + * ensures HdrcStart is indirectly called. + */ + if (musb->xceiv->last_event == USB_EVENT_ID) + musb_platform_set_vbus(musb, 1); - hcd->self.uses_pio_for_control = 1; - } if (musb->xceiv->last_event == USB_EVENT_NONE) pm_runtime_put(musb->controller); return 0; -err2: - if (!is_otg_enabled(musb)) - musb_stop(musb); -err0: +err: return retval; } @@ -2028,24 +1947,21 @@ static int musb_gadget_stop(struct usb_gadget *g, musb->xceiv->state = OTG_STATE_UNDEFINED; stop_activity(musb, driver); - otg_set_peripheral(musb->xceiv, NULL); + otg_set_peripheral(musb->xceiv->otg, NULL); - dev_dbg(musb->controller, "unregistering driver %s\n", driver->function); + dev_dbg(musb->controller, "unregistering driver %s\n", + driver ? driver->function : "(removed)"); musb->is_active = 0; + musb->gadget_driver = NULL; musb_platform_try_idle(musb, 0); spin_unlock_irqrestore(&musb->lock, flags); - if (is_otg_enabled(musb)) { - usb_remove_hcd(musb_to_hcd(musb)); - /* FIXME we need to be able to register another - * gadget driver here and have everything work; - * that currently misbehaves. - */ - } - - if (!is_otg_enabled(musb)) - musb_stop(musb); + /* + * FIXME we need to be able to register another + * gadget driver here and have everything work; + * that currently misbehaves. + */ pm_runtime_put(musb->controller); @@ -2073,7 +1989,7 @@ void musb_g_resume(struct musb *musb) break; default: WARNING("unhandled RESUME transition (%s)\n", - otg_state_string(musb->xceiv->state)); + usb_otg_state_string(musb->xceiv->state)); } } @@ -2103,7 +2019,7 @@ void musb_g_suspend(struct musb *musb) * A_PERIPHERAL may need care too */ WARNING("unhandled SUSPEND transition (%s)\n", - otg_state_string(musb->xceiv->state)); + usb_otg_state_string(musb->xceiv->state)); } } @@ -2137,7 +2053,7 @@ void musb_g_disconnect(struct musb *musb) switch (musb->xceiv->state) { default: dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", - otg_state_string(musb->xceiv->state)); + usb_otg_state_string(musb->xceiv->state)); musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); break; @@ -2166,10 +2082,9 @@ __acquires(musb->lock) u8 devctl = musb_readb(mbase, MUSB_DEVCTL); u8 power; - dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n", + dev_dbg(musb->controller, "<== %s driver '%s'\n", (devctl & MUSB_DEVCTL_BDEVICE) ? "B-Device" : "A-Device", - musb_readb(mbase, MUSB_FADDR), musb->gadget_driver ? musb->gadget_driver->driver.name : NULL @@ -2204,16 +2119,22 @@ __acquires(musb->lock) /* Normal reset, as B-Device; * or else after HNP, as A-Device */ - if (devctl & MUSB_DEVCTL_BDEVICE) { + if (!musb->g.is_otg) { + /* USB device controllers that are not OTG compatible + * may not have DEVCTL register in silicon. + * In that case, do not rely on devctl for setting + * peripheral mode. + */ + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + musb->g.is_a_peripheral = 0; + } else if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_PERIPHERAL; musb->g.is_a_peripheral = 0; - } else if (is_otg_enabled(musb)) { + } else { musb->xceiv->state = OTG_STATE_A_PERIPHERAL; musb->g.is_a_peripheral = 1; - } else - WARN_ON(1); + } /* start with default limits on VBUS power draw */ - (void) musb_gadget_vbus_draw(&musb->g, - is_otg_enabled(musb) ? 8 : 100); + (void) musb_gadget_vbus_draw(&musb->g, 8); } |
