diff options
Diffstat (limited to 'drivers/usb/host/ohci-q.c')
| -rw-r--r-- | drivers/usb/host/ohci-q.c | 104 |
1 files changed, 80 insertions, 24 deletions
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index 9c9f3b59186..d4253e31942 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -8,6 +8,7 @@ */ #include <linux/irq.h> +#include <linux/slab.h> static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv) { @@ -40,25 +41,33 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status) __releases(ohci->lock) __acquires(ohci->lock) { + struct device *dev = ohci_to_hcd(ohci)->self.controller; + struct usb_host_endpoint *ep = urb->ep; + struct urb_priv *urb_priv; + // ASSERT (urb->hcpriv != 0); + restart: urb_free_priv (ohci, urb->hcpriv); + urb->hcpriv = NULL; if (likely(status == -EINPROGRESS)) status = 0; switch (usb_pipetype (urb->pipe)) { case PIPE_ISOCHRONOUS: ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; + if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { + if (quirk_amdiso(ohci)) + usb_amd_quirk_pll_enable(); + if (quirk_amdprefetch(ohci)) + sb800_prefetch(dev, 0); + } break; case PIPE_INTERRUPT: ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; break; } -#ifdef OHCI_VERBOSE_DEBUG - urb_print(urb, "RET", usb_pipeout (urb->pipe), status); -#endif - /* urb->complete() can reenter this HCD */ usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb); spin_unlock (&ohci->lock); @@ -71,6 +80,21 @@ __acquires(ohci->lock) ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); } + + /* + * An isochronous URB that is sumitted too late won't have any TDs + * (marked by the fact that the td_cnt value is larger than the + * actual number of TDs). If the next URB on this endpoint is like + * that, give it back now. + */ + if (!list_empty(&ep->urb_list)) { + urb = list_first_entry(&ep->urb_list, struct urb, urb_list); + urb_priv = urb->hcpriv; + if (urb_priv->td_cnt > urb_priv->length) { + status = 0; + goto restart; + } + } } @@ -119,7 +143,7 @@ static void periodic_link (struct ohci_hcd *ohci, struct ed *ed) { unsigned i; - ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n", + ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n", (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", ed, ed->branch, ed->load, ed->interval); @@ -159,9 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) { int branch; - if (ohci_to_hcd(ohci)->state == HC_STATE_QUIESCING) - return -EAGAIN; - ed->state = ED_OPER; ed->ed_prev = NULL; ed->ed_next = NULL; @@ -269,7 +290,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) } ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval; - ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", + ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", ed, ed->branch, ed->load, ed->interval); } @@ -418,13 +439,13 @@ static struct ed *ed_get ( is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN); /* FIXME usbcore changes dev->devnum before SET_ADDRESS - * suceeds ... otherwise we wouldn't need "pipe". + * succeeds ... otherwise we wouldn't need "pipe". */ info = usb_pipedevice (pipe); ed->type = usb_pipetype(pipe); info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7; - info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16; + info |= usb_endpoint_maxp(&ep->desc) << 16; if (udev->speed == USB_SPEED_LOW) info |= ED_LOWSPEED; /* only control transfers store pids in tds */ @@ -440,7 +461,7 @@ static struct ed *ed_get ( ed->load = usb_calc_bus_time ( udev->speed, !is_out, ed->type == PIPE_ISOCHRONOUS, - le16_to_cpu(ep->desc.wMaxPacketSize)) + usb_endpoint_maxp(&ep->desc)) / 1000; } } @@ -540,7 +561,6 @@ td_fill (struct ohci_hcd *ohci, u32 info, td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, (data & 0x0FFF) | 0xE000); - td->ed->last_iso = info & 0xffff; } else { td->hwCBP = cpu_to_hc32 (ohci, data); } @@ -575,6 +595,7 @@ static void td_submit_urb ( struct urb *urb ) { struct urb_priv *urb_priv = urb->hcpriv; + struct device *dev = ohci_to_hcd(ohci)->self.controller; dma_addr_t data; int data_len = urb->transfer_buffer_length; int cnt = 0; @@ -592,7 +613,6 @@ static void td_submit_urb ( urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C); } - urb_priv->td_cnt = 0; list_add (&urb_priv->pending, &ohci->pending); if (data_len) @@ -668,7 +688,8 @@ static void td_submit_urb ( * we could often reduce the number of TDs here. */ case PIPE_ISOCHRONOUS: - for (cnt = 0; cnt < urb->number_of_packets; cnt++) { + for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets; + cnt++) { int frame = urb->start_frame; // FIXME scheduling should handle frame counter @@ -680,6 +701,12 @@ static void td_submit_urb ( data + urb->iso_frame_desc [cnt].offset, urb->iso_frame_desc [cnt].length, urb, cnt); } + if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { + if (quirk_amdiso(ohci)) + usb_amd_quirk_pll_disable(); + if (quirk_amdprefetch(ohci)) + sb800_prefetch(dev, 1); + } periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; break; @@ -734,7 +761,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td) urb->iso_frame_desc [td->index].status = cc_to_error [cc]; if (cc != TD_CC_NOERROR) - ohci_vdbg (ohci, + ohci_dbg(ohci, "urb %p iso td %p (%d) len %d cc %d\n", urb, td, 1 + td->index, dlen, cc); @@ -766,7 +793,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td) } if (cc != TD_CC_NOERROR && cc < 0x0E) - ohci_vdbg (ohci, + ohci_dbg(ohci, "urb %p td %p (%d) cc %d, len=%d/%d\n", urb, td, 1 + td->index, cc, urb->actual_length, @@ -902,7 +929,7 @@ rescan_all: /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ - if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) { + if (likely(ohci->rh_state == OHCI_RH_RUNNING)) { if (tick_before (tick, ed->tick)) { skip_ed: last = &ed->ed_next; @@ -952,6 +979,7 @@ rescan_this: struct urb *urb; urb_priv_t *urb_priv; __hc32 savebits; + u32 tdINFO; td = list_entry (entry, struct td, td_list); urb = td->urb; @@ -966,12 +994,23 @@ rescan_this: savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK); *prev = td->hwNextTD | savebits; + /* If this was unlinked, the TD may not have been + * retired ... so manually save the data toggle. + * The controller ignores the value we save for + * control and ISO endpoints. + */ + tdINFO = hc32_to_cpup(ohci, &td->hwINFO); + if ((tdINFO & TD_T) == TD_T_DATA0) + ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C); + else if ((tdINFO & TD_T) == TD_T_DATA1) + ed->hwHeadP |= cpu_to_hc32(ohci, ED_C); + /* HC may have partly processed this TD */ td_done (ohci, urb, td); urb_priv->td_cnt++; /* if URB is done, clean up */ - if (urb_priv->td_cnt == urb_priv->length) { + if (urb_priv->td_cnt >= urb_priv->length) { modified = completed = 1; finish_urb(ohci, urb, 0); } @@ -990,7 +1029,7 @@ rescan_this: /* but if there's work queued, reschedule */ if (!list_empty (&ed->td_list)) { - if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)) + if (ohci->rh_state == OHCI_RH_RUNNING) ed_schedule (ohci, ed); } @@ -999,9 +1038,7 @@ rescan_this: } /* maybe reenable control and bulk lists */ - if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state) - && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING - && !ohci->ed_rm_list) { + if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) { u32 command = 0, control = 0; if (ohci->ed_controltail) { @@ -1063,7 +1100,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td) urb_priv->td_cnt++; /* If all this urb's TDs are done, call complete() */ - if (urb_priv->td_cnt == urb_priv->length) + if (urb_priv->td_cnt >= urb_priv->length) finish_urb(ohci, urb, status); /* clean schedule: unlink EDs that are no longer busy */ @@ -1108,6 +1145,25 @@ dl_done_list (struct ohci_hcd *ohci) while (td) { struct td *td_next = td->next_dl_td; + struct ed *ed = td->ed; + + /* + * Some OHCI controllers (NVIDIA for sure, maybe others) + * occasionally forget to add TDs to the done queue. Since + * TDs for a given endpoint are always processed in order, + * if we find a TD on the donelist then all of its + * predecessors must be finished as well. + */ + for (;;) { + struct td *td2; + + td2 = list_first_entry(&ed->td_list, struct td, + td_list); + if (td2 == td) + break; + takeback_td(ohci, td2); + } + takeback_td(ohci, td); td = td_next; } |
