aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/host/ohci-q.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/ohci-q.c')
-rw-r--r--drivers/usb/host/ohci-q.c82
1 files changed, 58 insertions, 24 deletions
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 83094d067e0..d4253e31942 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,15 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
__releases(ohci->lock)
__acquires(ohci->lock)
{
+ struct device *dev = ohci_to_hcd(ohci)->self.controller;
+ struct usb_host_endpoint *ep = urb->ep;
+ struct urb_priv *urb_priv;
+
// ASSERT (urb->hcpriv != 0);
+ restart:
urb_free_priv (ohci, urb->hcpriv);
+ urb->hcpriv = NULL;
if (likely(status == -EINPROGRESS))
status = 0;
@@ -52,9 +58,9 @@ __acquires(ohci->lock)
ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
- quirk_amd_pll(1);
+ usb_amd_quirk_pll_enable();
if (quirk_amdprefetch(ohci))
- sb800_prefetch(ohci, 0);
+ sb800_prefetch(dev, 0);
}
break;
case PIPE_INTERRUPT:
@@ -62,10 +68,6 @@ __acquires(ohci->lock)
break;
}
-#ifdef OHCI_VERBOSE_DEBUG
- urb_print(urb, "RET", usb_pipeout (urb->pipe), status);
-#endif
-
/* urb->complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
spin_unlock (&ohci->lock);
@@ -78,6 +80,21 @@ __acquires(ohci->lock)
ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
}
+
+ /*
+ * An isochronous URB that is sumitted too late won't have any TDs
+ * (marked by the fact that the td_cnt value is larger than the
+ * actual number of TDs). If the next URB on this endpoint is like
+ * that, give it back now.
+ */
+ if (!list_empty(&ep->urb_list)) {
+ urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+ urb_priv = urb->hcpriv;
+ if (urb_priv->td_cnt > urb_priv->length) {
+ status = 0;
+ goto restart;
+ }
+ }
}
@@ -126,7 +143,7 @@ static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
{
unsigned i;
- ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
@@ -273,7 +290,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
}
ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
- ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
+ ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
}
@@ -428,7 +445,7 @@ static struct ed *ed_get (
ed->type = usb_pipetype(pipe);
info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
- info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16;
+ info |= usb_endpoint_maxp(&ep->desc) << 16;
if (udev->speed == USB_SPEED_LOW)
info |= ED_LOWSPEED;
/* only control transfers store pids in tds */
@@ -444,7 +461,7 @@ static struct ed *ed_get (
ed->load = usb_calc_bus_time (
udev->speed, !is_out,
ed->type == PIPE_ISOCHRONOUS,
- le16_to_cpu(ep->desc.wMaxPacketSize))
+ usb_endpoint_maxp(&ep->desc))
/ 1000;
}
}
@@ -544,7 +561,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
*ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
(data & 0x0FFF) | 0xE000);
- td->ed->last_iso = info & 0xffff;
} else {
td->hwCBP = cpu_to_hc32 (ohci, data);
}
@@ -579,6 +595,7 @@ static void td_submit_urb (
struct urb *urb
) {
struct urb_priv *urb_priv = urb->hcpriv;
+ struct device *dev = ohci_to_hcd(ohci)->self.controller;
dma_addr_t data;
int data_len = urb->transfer_buffer_length;
int cnt = 0;
@@ -596,7 +613,6 @@ static void td_submit_urb (
urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
}
- urb_priv->td_cnt = 0;
list_add (&urb_priv->pending, &ohci->pending);
if (data_len)
@@ -672,7 +688,8 @@ static void td_submit_urb (
* we could often reduce the number of TDs here.
*/
case PIPE_ISOCHRONOUS:
- for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
+ for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets;
+ cnt++) {
int frame = urb->start_frame;
// FIXME scheduling should handle frame counter
@@ -686,9 +703,9 @@ static void td_submit_urb (
}
if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
if (quirk_amdiso(ohci))
- quirk_amd_pll(0);
+ usb_amd_quirk_pll_disable();
if (quirk_amdprefetch(ohci))
- sb800_prefetch(ohci, 1);
+ sb800_prefetch(dev, 1);
}
periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
@@ -744,7 +761,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
urb->iso_frame_desc [td->index].status = cc_to_error [cc];
if (cc != TD_CC_NOERROR)
- ohci_vdbg (ohci,
+ ohci_dbg(ohci,
"urb %p iso td %p (%d) len %d cc %d\n",
urb, td, 1 + td->index, dlen, cc);
@@ -776,7 +793,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
}
if (cc != TD_CC_NOERROR && cc < 0x0E)
- ohci_vdbg (ohci,
+ ohci_dbg(ohci,
"urb %p td %p (%d) cc %d, len=%d/%d\n",
urb, td, 1 + td->index, cc,
urb->actual_length,
@@ -912,7 +929,7 @@ rescan_all:
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
- if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
+ if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
if (tick_before (tick, ed->tick)) {
skip_ed:
last = &ed->ed_next;
@@ -993,7 +1010,7 @@ rescan_this:
urb_priv->td_cnt++;
/* if URB is done, clean up */
- if (urb_priv->td_cnt == urb_priv->length) {
+ if (urb_priv->td_cnt >= urb_priv->length) {
modified = completed = 1;
finish_urb(ohci, urb, 0);
}
@@ -1012,7 +1029,7 @@ rescan_this:
/* but if there's work queued, reschedule */
if (!list_empty (&ed->td_list)) {
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
+ if (ohci->rh_state == OHCI_RH_RUNNING)
ed_schedule (ohci, ed);
}
@@ -1021,9 +1038,7 @@ rescan_this:
}
/* maybe reenable control and bulk lists */
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
- && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
- && !ohci->ed_rm_list) {
+ if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
u32 command = 0, control = 0;
if (ohci->ed_controltail) {
@@ -1085,7 +1100,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
urb_priv->td_cnt++;
/* If all this urb's TDs are done, call complete() */
- if (urb_priv->td_cnt == urb_priv->length)
+ if (urb_priv->td_cnt >= urb_priv->length)
finish_urb(ohci, urb, status);
/* clean schedule: unlink EDs that are no longer busy */
@@ -1130,6 +1145,25 @@ dl_done_list (struct ohci_hcd *ohci)
while (td) {
struct td *td_next = td->next_dl_td;
+ struct ed *ed = td->ed;
+
+ /*
+ * Some OHCI controllers (NVIDIA for sure, maybe others)
+ * occasionally forget to add TDs to the done queue. Since
+ * TDs for a given endpoint are always processed in order,
+ * if we find a TD on the donelist then all of its
+ * predecessors must be finished as well.
+ */
+ for (;;) {
+ struct td *td2;
+
+ td2 = list_first_entry(&ed->td_list, struct td,
+ td_list);
+ if (td2 == td)
+ break;
+ takeback_td(ohci, td2);
+ }
+
takeback_td(ohci, td);
td = td_next;
}