aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/musb/musb_gadget.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb/musb_gadget.c')
-rw-r--r--drivers/usb/musb/musb_gadget.c71
1 files changed, 44 insertions, 27 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index fe8d14cac43..ae4a20acef6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -634,6 +634,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+ u8 use_mode_1;
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
@@ -683,6 +684,18 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (csr & MUSB_RXCSR_RXPKTRDY) {
len = musb_readw(epio, MUSB_RXCOUNT);
+
+ /*
+ * Enable Mode 1 on RX transfers only when short_not_ok flag
+ * is set. Currently short_not_ok flag is set only from
+ * file_storage and f_mass_storage drivers
+ */
+
+ if (request->short_not_ok && len == musb_ep->packet_sz)
+ use_mode_1 = 1;
+ else
+ use_mode_1 = 0;
+
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_buffer_mapped(req)) {
@@ -714,37 +727,41 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* then becomes usable as a runtime "use mode 1" hint...
*/
- csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
- csr |= MUSB_RXCSR_AUTOCLEAR;
- /* csr |= MUSB_RXCSR_DMAMODE; */
-
- /* this special sequence (enabling and then
- * disabling MUSB_RXCSR_DMAMODE) is required
- * to get DMAReq to activate
- */
- musb_writew(epio, MUSB_RXCSR,
- csr | MUSB_RXCSR_DMAMODE);
-#else
- if (!musb_ep->hb_mult &&
- musb_ep->hw_ep->rx_double_buffered)
+ /* Experimental: Mode1 works with mass storage use cases */
+ if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
-#endif
- musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ /*
+ * this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ } else {
+ if (!musb_ep->hb_mult &&
+ musb_ep->hw_ep->rx_double_buffered)
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
if (request->actual < request->length) {
int transfer_size = 0;
-#ifdef USE_MODE1
- transfer_size = min(request->length - request->actual,
- channel->max_len);
-#else
- transfer_size = min(request->length - request->actual,
- (unsigned)len);
-#endif
- if (transfer_size <= musb_ep->packet_sz)
- musb_ep->dma->desired_mode = 0;
- else
+ if (use_mode_1) {
+ transfer_size = min(request->length - request->actual,
+ channel->max_len);
musb_ep->dma->desired_mode = 1;
+ } else {
+ transfer_size = min(request->length - request->actual,
+ (unsigned)len);
+ musb_ep->dma->desired_mode = 0;
+ }
use_dma = c->channel_program(
channel,
@@ -1020,7 +1037,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
- tmp = le16_to_cpu(desc->wMaxPacketSize);
+ tmp = usb_endpoint_maxp(desc);
if (tmp & ~0x07ff) {
int ok;