diff options
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
| -rw-r--r-- | drivers/usb/host/uhci-q.c | 1543 |
1 files changed, 964 insertions, 579 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index a06d84c19e1..da6f56d996c 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -13,10 +13,9 @@ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) - * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu + * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu */ -static void uhci_free_pending_tds(struct uhci_hcd *uhci); /* * Technically, updating td->status here is a race, but it's not really a @@ -30,14 +29,80 @@ static void uhci_set_next_interrupt(struct uhci_hcd *uhci) { if (uhci->is_stopped) mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); - uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); + uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); } static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) { - uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); + uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC); } + +/* + * Full-Speed Bandwidth Reclamation (FSBR). + * We turn on FSBR whenever a queue that wants it is advancing, + * and leave it on for a short time thereafter. + */ +static void uhci_fsbr_on(struct uhci_hcd *uhci) +{ + struct uhci_qh *lqh; + + /* The terminating skeleton QH always points back to the first + * FSBR QH. Make the last async QH point to the terminating + * skeleton QH. */ + uhci->fsbr_is_on = 1; + lqh = list_entry(uhci->skel_async_qh->node.prev, + struct uhci_qh, node); + lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh); +} + +static void uhci_fsbr_off(struct uhci_hcd *uhci) +{ + struct uhci_qh *lqh; + + /* Remove the link from the last async QH to the terminating + * skeleton QH. */ + uhci->fsbr_is_on = 0; + lqh = list_entry(uhci->skel_async_qh->node.prev, + struct uhci_qh, node); + lqh->link = UHCI_PTR_TERM(uhci); +} + +static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) +{ + struct urb_priv *urbp = urb->hcpriv; + + if (!(urb->transfer_flags & URB_NO_FSBR)) + urbp->fsbr = 1; +} + +static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) +{ + if (urbp->fsbr) { + uhci->fsbr_is_wanted = 1; + if (!uhci->fsbr_is_on) + uhci_fsbr_on(uhci); + else if (uhci->fsbr_expiring) { + uhci->fsbr_expiring = 0; + del_timer(&uhci->fsbr_timer); + } + } +} + +static void uhci_fsbr_timeout(unsigned long _uhci) +{ + struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci; + unsigned long flags; + + spin_lock_irqsave(&uhci->lock, flags); + if (uhci->fsbr_expiring) { + uhci->fsbr_expiring = 0; + uhci_fsbr_off(uhci); + } + spin_unlock_irqrestore(&uhci->lock, flags); +} + + static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) { dma_addr_t dma_handle; @@ -51,7 +116,6 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) td->frame = -1; INIT_LIST_HEAD(&td->list); - INIT_LIST_HEAD(&td->remove_list); INIT_LIST_HEAD(&td->fl_list); return td; @@ -60,21 +124,29 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) { if (!list_empty(&td->list)) - dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); - if (!list_empty(&td->remove_list)) - dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td); + dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td); if (!list_empty(&td->fl_list)) - dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); + dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td); dma_pool_free(uhci->td_pool, td, td->dma_handle); } -static inline void uhci_fill_td(struct uhci_td *td, u32 status, - u32 token, u32 buffer) +static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td, + u32 status, u32 token, u32 buffer) { - td->status = cpu_to_le32(status); - td->token = cpu_to_le32(token); - td->buffer = cpu_to_le32(buffer); + td->status = cpu_to_hc32(uhci, status); + td->token = cpu_to_hc32(uhci, token); + td->buffer = cpu_to_hc32(uhci, buffer); +} + +static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) +{ + list_add_tail(&td->list, &urbp->td_list); +} + +static void uhci_remove_td_from_urbp(struct uhci_td *td) +{ + list_del_init(&td->list); } /* @@ -98,11 +170,11 @@ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, td->link = ltd->link; wmb(); - ltd->link = cpu_to_le32(td->dma_handle); + ltd->link = LINK_TO_TD(uhci, td); } else { td->link = uhci->frame[framenum]; wmb(); - uhci->frame[framenum] = cpu_to_le32(td->dma_handle); + uhci->frame[framenum] = LINK_TO_TD(uhci, td); uhci->frame_cpu[framenum] = td; } } @@ -123,8 +195,10 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, } else { struct uhci_td *ntd; - ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); - uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); + ntd = list_entry(td->fl_list.next, + struct uhci_td, + fl_list); + uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd); uhci->frame_cpu[td->frame] = ntd; } } else { @@ -138,6 +212,24 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, td->frame = -1; } +static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci, + unsigned int framenum) +{ + struct uhci_td *ftd, *ltd; + + framenum &= (UHCI_NUMFRAMES - 1); + + ftd = uhci->frame_cpu[framenum]; + if (ftd) { + ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); + uhci->frame[framenum] = ltd->link; + uhci->frame_cpu[framenum] = NULL; + + while (!list_empty(&ftd->fl_list)) + list_del_init(ftd->fl_list.prev); + } +} + /* * Remove all the TDs for an Isochronous URB from the frame list */ @@ -148,7 +240,6 @@ static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) list_for_each_entry(td, &urbp->td_list, list) uhci_remove_td_from_frame_list(uhci, td); - wmb(); } static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, @@ -161,28 +252,40 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, if (!qh) return NULL; + memset(qh, 0, sizeof(*qh)); qh->dma_handle = dma_handle; - qh->element = UHCI_PTR_TERM; - qh->link = UHCI_PTR_TERM; + qh->element = UHCI_PTR_TERM(uhci); + qh->link = UHCI_PTR_TERM(uhci); INIT_LIST_HEAD(&qh->queue); INIT_LIST_HEAD(&qh->node); if (udev) { /* Normal QH */ - qh->dummy_td = uhci_alloc_td(uhci); - if (!qh->dummy_td) { - dma_pool_free(uhci->qh_pool, qh, dma_handle); - return NULL; + qh->type = usb_endpoint_type(&hep->desc); + if (qh->type != USB_ENDPOINT_XFER_ISOC) { + qh->dummy_td = uhci_alloc_td(uhci); + if (!qh->dummy_td) { + dma_pool_free(uhci->qh_pool, qh, dma_handle); + return NULL; + } } qh->state = QH_STATE_IDLE; qh->hep = hep; qh->udev = udev; hep->hcpriv = qh; + if (qh->type == USB_ENDPOINT_XFER_INT || + qh->type == USB_ENDPOINT_XFER_ISOC) + qh->load = usb_calc_bus_time(udev->speed, + usb_endpoint_dir_in(&hep->desc), + qh->type == USB_ENDPOINT_XFER_ISOC, + usb_endpoint_maxp(&hep->desc)) + / 1000 + 1; + } else { /* Skeleton QH */ qh->state = QH_STATE_ACTIVE; - qh->udev = NULL; + qh->type = -1; } return qh; } @@ -191,53 +294,84 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); if (!list_empty(&qh->queue)) - dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); + dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh); list_del(&qh->node); if (qh->udev) { qh->hep->hcpriv = NULL; - uhci_free_td(uhci, qh->dummy_td); + if (qh->dummy_td) + uhci_free_td(uhci, qh->dummy_td); } dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); } /* - * When the currently executing URB is dequeued, save its current toggle value + * When a queue is stopped and a dequeued URB is given back, adjust + * the previous TD link (if the URB isn't first on the queue) or + * save its toggle value (if it is first and is currently executing). + * + * Returns 0 if the URB should not yet be given back, 1 otherwise. */ -static void uhci_save_toggle(struct uhci_qh *qh, struct urb *urb) +static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, + struct urb *urb) { - struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; + struct urb_priv *urbp = urb->hcpriv; struct uhci_td *td; + int ret = 1; + + /* Isochronous pipes don't use toggles and their TD link pointers + * get adjusted during uhci_urb_dequeue(). But since their queues + * cannot truly be stopped, we have to watch out for dequeues + * occurring after the nominal unlink frame. */ + if (qh->type == USB_ENDPOINT_XFER_ISOC) { + ret = (uhci->frame_number + uhci->is_stopped != + qh->unlink_frame); + goto done; + } + + /* If the URB isn't first on its queue, adjust the link pointer + * of the last TD in the previous URB. The toggle doesn't need + * to be saved since this URB can't be executing yet. */ + if (qh->queue.next != &urbp->node) { + struct urb_priv *purbp; + struct uhci_td *ptd; + + purbp = list_entry(urbp->node.prev, struct urb_priv, node); + WARN_ON(list_empty(&purbp->td_list)); + ptd = list_entry(purbp->td_list.prev, struct uhci_td, + list); + td = list_entry(urbp->td_list.prev, struct uhci_td, + list); + ptd->link = td->link; + goto done; + } /* If the QH element pointer is UHCI_PTR_TERM then then currently * executing URB has already been unlinked, so this one isn't it. */ - if (qh_element(qh) == UHCI_PTR_TERM || - qh->queue.next != &urbp->node) - return; - qh->element = UHCI_PTR_TERM; + if (qh_element(qh) == UHCI_PTR_TERM(uhci)) + goto done; + qh->element = UHCI_PTR_TERM(uhci); - /* Only bulk and interrupt pipes have to worry about toggles */ - if (!(usb_pipetype(urb->pipe) == PIPE_BULK || - usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) - return; + /* Control pipes don't have to worry about toggles */ + if (qh->type == USB_ENDPOINT_XFER_CONTROL) + goto done; - /* Find the first active TD; that's the device's toggle state */ - list_for_each_entry(td, &urbp->td_list, list) { - if (td_status(td) & TD_CTRL_ACTIVE) { - qh->needs_fixup = 1; - qh->initial_toggle = uhci_toggle(td_token(td)); - return; - } - } + /* Save the next toggle value */ + WARN_ON(list_empty(&urbp->td_list)); + td = list_entry(urbp->td_list.next, struct uhci_td, list); + qh->needs_fixup = 1; + qh->initial_toggle = uhci_toggle(td_token(uhci, td)); - WARN_ON(1); +done: + return ret; } /* * Fix up the data toggles for URBs in a queue, when one of them * terminates early (short transfer, error, or dequeued). */ -static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) +static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh, + int skip_first) { struct urb_priv *urbp = NULL; struct uhci_td *td; @@ -251,7 +385,7 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) /* When starting with the first URB, if the QH element pointer is * still valid then we know the URB's toggles are okay. */ - else if (qh_element(qh) != UHCI_PTR_TERM) + else if (qh_element(qh) != UHCI_PTR_TERM(uhci)) toggle = 2; /* Fix up the toggle for the URBs in the queue. Normally this @@ -263,15 +397,15 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) /* If the first TD has the right toggle value, we don't * need to change any toggles in this URB */ td = list_entry(urbp->td_list.next, struct uhci_td, list); - if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { - td = list_entry(urbp->td_list.next, struct uhci_td, + if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) { + td = list_entry(urbp->td_list.prev, struct uhci_td, list); - toggle = uhci_toggle(td_token(td)) ^ 1; + toggle = uhci_toggle(td_token(uhci, td)) ^ 1; /* Otherwise all the toggles in the URB have to be switched */ } else { list_for_each_entry(td, &urbp->td_list, list) { - td->token ^= __constant_cpu_to_le32( + td->token ^= cpu_to_hc32(uhci, TD_TOKEN_TOGGLE); toggle ^= 1; } @@ -286,65 +420,155 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) } /* - * Put a QH on the schedule in both hardware and software + * Link an Isochronous QH into its skeleton's list */ -static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) +static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ + list_add_tail(&qh->node, &uhci->skel_iso_qh->node); + + /* Isochronous QHs aren't linked by the hardware */ +} + +/* + * Link a high-period interrupt QH into the schedule at the end of its + * skeleton's list + */ +static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ + struct uhci_qh *pqh; + + list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node); + + pqh = list_entry(qh->node.prev, struct uhci_qh, node); + qh->link = pqh->link; + wmb(); + pqh->link = LINK_TO_QH(uhci, qh); +} + +/* + * Link a period-1 interrupt or async QH into the schedule at the + * correct spot in the async skeleton's list, and update the FSBR link + */ +static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; + __hc32 link_to_new_qh; + + /* Find the predecessor QH for our new one and insert it in the list. + * The list of QHs is expected to be short, so linear search won't + * take too long. */ + list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) { + if (pqh->skel <= qh->skel) + break; + } + list_add(&qh->node, &pqh->node); + /* Link it into the schedule */ + qh->link = pqh->link; + wmb(); + link_to_new_qh = LINK_TO_QH(uhci, qh); + pqh->link = link_to_new_qh; + + /* If this is now the first FSBR QH, link the terminating skeleton + * QH to it. */ + if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) + uhci->skel_term_qh->link = link_to_new_qh; +} + +/* + * Put a QH on the schedule in both hardware and software + */ +static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ WARN_ON(list_empty(&qh->queue)); /* Set the element pointer if it isn't set already. * This isn't needed for Isochronous queues, but it doesn't hurt. */ - if (qh_element(qh) == UHCI_PTR_TERM) { + if (qh_element(qh) == UHCI_PTR_TERM(uhci)) { struct urb_priv *urbp = list_entry(qh->queue.next, struct urb_priv, node); struct uhci_td *td = list_entry(urbp->td_list.next, struct uhci_td, list); - qh->element = cpu_to_le32(td->dma_handle); + qh->element = LINK_TO_TD(uhci, td); } + /* Treat the queue as if it has just advanced */ + qh->wait_expired = 0; + qh->advance_jiffies = jiffies; + if (qh->state == QH_STATE_ACTIVE) return; qh->state = QH_STATE_ACTIVE; - /* Move the QH from its old list to the end of the appropriate + /* Move the QH from its old list to the correct spot in the appropriate * skeleton's list */ if (qh == uhci->next_qh) uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, node); - list_move_tail(&qh->node, &qh->skel->node); + list_del(&qh->node); + + if (qh->skel == SKEL_ISO) + link_iso(uhci, qh); + else if (qh->skel < SKEL_ASYNC) + link_interrupt(uhci, qh); + else + link_async(uhci, qh); +} + +/* + * Unlink a high-period interrupt QH from the schedule + */ +static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ + struct uhci_qh *pqh; - /* Link it into the schedule */ pqh = list_entry(qh->node.prev, struct uhci_qh, node); - qh->link = pqh->link; - wmb(); - pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); + pqh->link = qh->link; + mb(); } /* - * Take a QH off the hardware schedule + * Unlink a period-1 interrupt or async QH from the schedule */ -static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) +static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; + __hc32 link_to_next_qh = qh->link; + pqh = list_entry(qh->node.prev, struct uhci_qh, node); + pqh->link = link_to_next_qh; + + /* If this was the old first FSBR QH, link the terminating skeleton + * QH to the next (new first FSBR) QH. */ + if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) + uhci->skel_term_qh->link = link_to_next_qh; + mb(); +} + +/* + * Take a QH off the hardware schedule + */ +static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ if (qh->state == QH_STATE_UNLINKING) return; WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); qh->state = QH_STATE_UNLINKING; /* Unlink the QH from the schedule and record when we did it */ - pqh = list_entry(qh->node.prev, struct uhci_qh, node); - pqh->link = qh->link; - mb(); + if (qh->skel == SKEL_ISO) + ; + else if (qh->skel < SKEL_ASYNC) + unlink_interrupt(uhci, qh); + else + unlink_async(uhci, qh); uhci_get_current_frame_number(uhci); qh->unlink_frame = uhci->frame_number; /* Force an interrupt so we know when the QH is fully unlinked */ - if (list_empty(&uhci->skel_unlink_qh->node)) + if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped) uhci_set_next_interrupt(uhci); /* Move the QH from its old list to the end of the unlinking list */ @@ -370,102 +594,172 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) list_move(&qh->node, &uhci->idle_qh_list); qh->state = QH_STATE_IDLE; + /* Now that the QH is idle, its post_td isn't being used */ + if (qh->post_td) { + uhci_free_td(uhci, qh->post_td); + qh->post_td = NULL; + } + /* If anyone is waiting for a QH to become idle, wake them up */ if (uhci->num_waiting) wake_up_all(&uhci->waitqh); } +/* + * Find the highest existing bandwidth load for a given phase and period. + */ +static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) +{ + int highest_load = uhci->load[phase]; + + for (phase += period; phase < MAX_PHASE; phase += period) + highest_load = max_t(int, highest_load, uhci->load[phase]); + return highest_load; +} + +/* + * Set qh->phase to the optimal phase for a periodic transfer and + * check whether the bandwidth requirement is acceptable. + */ +static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ + int minimax_load; + + /* Find the optimal phase (unless it is already set) and get + * its load value. */ + if (qh->phase >= 0) + minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); + else { + int phase, load; + int max_phase = min_t(int, MAX_PHASE, qh->period); + + qh->phase = 0; + minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); + for (phase = 1; phase < max_phase; ++phase) { + load = uhci_highest_load(uhci, phase, qh->period); + if (load < minimax_load) { + minimax_load = load; + qh->phase = phase; + } + } + } + + /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */ + if (minimax_load + qh->load > 900) { + dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: " + "period %d, phase %d, %d + %d us\n", + qh->period, qh->phase, minimax_load, qh->load); + return -ENOSPC; + } + return 0; +} + +/* + * Reserve a periodic QH's bandwidth in the schedule + */ +static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ + int i; + int load = qh->load; + char *p = "??"; + + for (i = qh->phase; i < MAX_PHASE; i += qh->period) { + uhci->load[i] += load; + uhci->total_load += load; + } + uhci_to_hcd(uhci)->self.bandwidth_allocated = + uhci->total_load / MAX_PHASE; + switch (qh->type) { + case USB_ENDPOINT_XFER_INT: + ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs; + p = "INT"; + break; + case USB_ENDPOINT_XFER_ISOC: + ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; + p = "ISO"; + break; + } + qh->bandwidth_reserved = 1; + dev_dbg(uhci_dev(uhci), + "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", + "reserve", qh->udev->devnum, + qh->hep->desc.bEndpointAddress, p, + qh->period, qh->phase, load); +} + +/* + * Release a periodic QH's bandwidth reservation + */ +static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) +{ + int i; + int load = qh->load; + char *p = "??"; + + for (i = qh->phase; i < MAX_PHASE; i += qh->period) { + uhci->load[i] -= load; + uhci->total_load -= load; + } + uhci_to_hcd(uhci)->self.bandwidth_allocated = + uhci->total_load / MAX_PHASE; + switch (qh->type) { + case USB_ENDPOINT_XFER_INT: + --uhci_to_hcd(uhci)->self.bandwidth_int_reqs; + p = "INT"; + break; + case USB_ENDPOINT_XFER_ISOC: + --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; + p = "ISO"; + break; + } + qh->bandwidth_reserved = 0; + dev_dbg(uhci_dev(uhci), + "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", + "release", qh->udev->devnum, + qh->hep->desc.bEndpointAddress, p, + qh->period, qh->phase, load); +} + static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb) { struct urb_priv *urbp; - urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); + urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC); if (!urbp) return NULL; - memset((void *)urbp, 0, sizeof(*urbp)); - urbp->urb = urb; urb->hcpriv = urbp; - + INIT_LIST_HEAD(&urbp->node); INIT_LIST_HEAD(&urbp->td_list); return urbp; } -static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td) -{ - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - - list_add_tail(&td->list, &urbp->td_list); -} - -static void uhci_remove_td_from_urb(struct uhci_td *td) -{ - if (list_empty(&td->list)) - return; - - list_del_init(&td->list); -} - static void uhci_free_urb_priv(struct uhci_hcd *uhci, struct urb_priv *urbp) { struct uhci_td *td, *tmp; if (!list_empty(&urbp->node)) - dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", + dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n", urbp->urb); - uhci_get_current_frame_number(uhci); - if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) { - uhci_free_pending_tds(uhci); - uhci->td_remove_age = uhci->frame_number; - } - - /* Check to see if the remove list is empty. Set the IOC bit */ - /* to force an interrupt so we can remove the TDs. */ - if (list_empty(&uhci->td_remove_list)) - uhci_set_next_interrupt(uhci); - list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { - uhci_remove_td_from_urb(td); - list_add(&td->remove_list, &uhci->td_remove_list); + uhci_remove_td_from_urbp(td); + uhci_free_td(uhci, td); } - urbp->urb->hcpriv = NULL; kmem_cache_free(uhci_up_cachep, urbp); } -static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb) -{ - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - - if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) { - urbp->fsbr = 1; - if (!uhci->fsbr++ && !uhci->fsbrtimeout) - uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; - } -} - -static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb) -{ - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - - if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) { - urbp->fsbr = 0; - if (!--uhci->fsbr) - uhci->fsbrtimeout = jiffies + FSBR_DELAY; - } -} - /* * Map status to standard result codes * - * <status> is (td_status(td) & 0xF60000), a.k.a. - * uhci_status_bits(td_status(td)). + * <status> is (td_status(uhci, td) & 0xF60000), a.k.a. + * uhci_status_bits(td_status(uhci, td)). * Note: <status> does not include the TD_CTRL_NAK bit. * <dir_out> is True for output TDs and False for input TDs. */ @@ -487,7 +781,6 @@ static int uhci_map_status(int status, int dir_out) return -ENOSR; if (status & TD_CTRL_STALLED) /* Stalled */ return -EPIPE; - WARN_ON(status & TD_CTRL_ACTIVE); /* Active */ return 0; } @@ -499,10 +792,12 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, { struct uhci_td *td; unsigned long destination, status; - int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); + int maxsze = usb_endpoint_maxp(&qh->hep->desc); int len = urb->transfer_buffer_length; dma_addr_t data = urb->transfer_dma; - __le32 *plink; + __hc32 *plink; + struct urb_priv *urbp = urb->hcpriv; + int skel; /* The "pipe" thing contains the destination in bits 8--18 */ destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; @@ -516,8 +811,8 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, * Build the TD for the control request setup packet */ td = qh->dummy_td; - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination | uhci_explen(8), + uhci_add_td_to_urbp(td, urbp); + uhci_fill_td(uhci, td, status, destination | uhci_explen(8), urb->setup_dma); plink = &td->link; status |= TD_CTRL_ACTIVE; @@ -526,8 +821,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, * If direction is "send", change the packet ID from SETUP (0x2D) * to OUT (0xE1). Else change it from SETUP to IN (0x69) and * set Short Packet Detect (SPD) for all data packets. + * + * 0-length transfers always get treated as "send". */ - if (usb_pipeout(urb->pipe)) + if (usb_pipeout(urb->pipe) || len == 0) destination ^= (USB_PID_SETUP ^ USB_PID_OUT); else { destination ^= (USB_PID_SETUP ^ USB_PID_IN); @@ -538,19 +835,24 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, * Build the DATA TDs */ while (len > 0) { - int pktsze = min(len, maxsze); + int pktsze = maxsze; + + if (len <= pktsze) { /* The last data packet */ + pktsze = len; + status &= ~TD_CTRL_SPD; + } td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = cpu_to_le32(td->dma_handle); + *plink = LINK_TO_TD(uhci, td); /* Alternate Data0/1 (start with Data1) */ destination ^= TD_TOKEN_TOGGLE; - - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination | uhci_explen(pktsze), - data); + + uhci_add_td_to_urbp(td, urbp); + uhci_fill_td(uhci, td, status, + destination | uhci_explen(pktsze), data); plink = &td->link; data += pktsze; @@ -558,29 +860,19 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, } /* - * Build the final TD for control status + * Build the final TD for control status */ td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = cpu_to_le32(td->dma_handle); - - /* - * It's IN if the pipe is an output pipe or we're not expecting - * data back. - */ - destination &= ~TD_TOKEN_PID_MASK; - if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) - destination |= USB_PID_IN; - else - destination |= USB_PID_OUT; + *plink = LINK_TO_TD(uhci, td); + /* Change direction for the status transaction */ + destination ^= (USB_PID_IN ^ USB_PID_OUT); destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ - status &= ~TD_CTRL_SPD; - - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status | TD_CTRL_IOC, + uhci_add_td_to_urbp(td, urbp); + uhci_fill_td(uhci, td, status | TD_CTRL_IOC, destination | uhci_explen(0), 0); plink = &td->link; @@ -590,11 +882,11 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = cpu_to_le32(td->dma_handle); + *plink = LINK_TO_TD(uhci, td); - uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); + uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0); wmb(); - qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); + qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); qh->dummy_td = td; /* Low-speed transfers get a different queue, and won't hog the bus. @@ -603,148 +895,22 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, * isn't in the CONFIGURED state. */ if (urb->dev->speed == USB_SPEED_LOW || urb->dev->state != USB_STATE_CONFIGURED) - qh->skel = uhci->skel_ls_control_qh; + skel = SKEL_LS_CONTROL; else { - qh->skel = uhci->skel_fs_control_qh; - uhci_inc_fsbr(uhci, urb); + skel = SKEL_FS_CONTROL; + uhci_add_fsbr(uhci, urb); } + if (qh->state != QH_STATE_ACTIVE) + qh->skel = skel; return 0; nomem: /* Remove the dummy TD from the td_list so it doesn't get freed */ - uhci_remove_td_from_urb(qh->dummy_td); + uhci_remove_td_from_urbp(qh->dummy_td); return -ENOMEM; } /* - * If control-IN transfer was short, the status packet wasn't sent. - * This routine changes the element pointer in the QH to point at the - * status TD. It's safe to do this even while the QH is live, because - * the hardware only updates the element pointer following a successful - * transfer. The inactive TD for the short packet won't cause an update, - * so the pointer won't get overwritten. The next time the controller - * sees this QH, it will send the status packet. - */ -static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) -{ - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - struct uhci_td *td; - - urbp->short_transfer = 1; - - td = list_entry(urbp->td_list.prev, struct uhci_td, list); - urbp->qh->element = cpu_to_le32(td->dma_handle); - - return -EINPROGRESS; -} - - -static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) -{ - struct list_head *tmp, *head; - struct urb_priv *urbp = urb->hcpriv; - struct uhci_td *td; - unsigned int status; - int ret = 0; - - head = &urbp->td_list; - if (urbp->short_transfer) { - tmp = head->prev; - goto status_stage; - } - - urb->actual_length = 0; - - tmp = head->next; - td = list_entry(tmp, struct uhci_td, list); - - /* The first TD is the SETUP stage, check the status, but skip */ - /* the count */ - status = uhci_status_bits(td_status(td)); - if (status & TD_CTRL_ACTIVE) - return -EINPROGRESS; - - if (status) - goto td_error; - - /* The rest of the TDs (but the last) are data */ - tmp = tmp->next; - while (tmp != head && tmp->next != head) { - unsigned int ctrlstat; - - td = list_entry(tmp, struct uhci_td, list); - tmp = tmp->next; - - ctrlstat = td_status(td); - status = uhci_status_bits(ctrlstat); - if (status & TD_CTRL_ACTIVE) - return -EINPROGRESS; - - urb->actual_length += uhci_actual_length(ctrlstat); - - if (status) - goto td_error; - - /* Check to see if we received a short packet */ - if (uhci_actual_length(ctrlstat) < - uhci_expected_length(td_token(td))) { - if (urb->transfer_flags & URB_SHORT_NOT_OK) { - ret = -EREMOTEIO; - goto err; - } - - return usb_control_retrigger_status(uhci, urb); - } - } - -status_stage: - td = list_entry(tmp, struct uhci_td, list); - - /* Control status stage */ - status = td_status(td); - -#ifdef I_HAVE_BUGGY_APC_BACKUPS - /* APC BackUPS Pro kludge */ - /* It tries to send all of the descriptor instead of the amount */ - /* we requested */ - if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */ - status & TD_CTRL_ACTIVE && - status & TD_CTRL_NAK) - return 0; -#endif - - status = uhci_status_bits(status); - if (status & TD_CTRL_ACTIVE) - return -EINPROGRESS; - - if (status) - goto td_error; - - return 0; - -td_error: - ret = uhci_map_status(status, uhci_packetout(td_token(td))); - -err: - if ((debug == 1 && ret != -EPIPE) || debug > 1) { - /* Some debugging code */ - dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n", - __FUNCTION__, status); - - if (errbuf) { - /* Print the chain for debugging purposes */ - uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); - lprintk(errbuf); - } - } - - /* Note that the queue has stopped */ - urbp->qh->element = UHCI_PTR_TERM; - urbp->qh->is_stopped = 1; - return ret; -} - -/* * Common submit for bulk and interrupt */ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, @@ -752,11 +918,15 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, { struct uhci_td *td; unsigned long destination, status; - int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); + int maxsze = usb_endpoint_maxp(&qh->hep->desc); int len = urb->transfer_buffer_length; - dma_addr_t data = urb->transfer_dma; - __le32 *plink; + int this_sg_len; + dma_addr_t data; + __hc32 *plink; + struct urb_priv *urbp = urb->hcpriv; unsigned int toggle; + struct scatterlist *sg; + int i; if (len < 0) return -EINVAL; @@ -773,12 +943,26 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, if (usb_pipein(urb->pipe)) status |= TD_CTRL_SPD; + i = urb->num_mapped_sgs; + if (len > 0 && i > 0) { + sg = urb->sg; + data = sg_dma_address(sg); + + /* urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), len); + } else { + sg = NULL; + data = urb->transfer_dma; + this_sg_len = len; + } /* * Build the DATA TDs */ plink = NULL; td = qh->dummy_td; - do { /* Allow zero length packets */ + for (;;) { /* Allow zero length packets */ int pktsze = maxsze; if (len <= pktsze) { /* The last packet */ @@ -791,20 +975,28 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = cpu_to_le32(td->dma_handle); + *plink = LINK_TO_TD(uhci, td); } - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, + uhci_add_td_to_urbp(td, urbp); + uhci_fill_td(uhci, td, status, destination | uhci_explen(pktsze) | (toggle << TD_TOKEN_TOGGLE_SHIFT), data); plink = &td->link; status |= TD_CTRL_ACTIVE; + toggle ^= 1; data += pktsze; + this_sg_len -= pktsze; len -= maxsze; - toggle ^= 1; - } while (len > 0); + if (this_sg_len <= 0) { + if (--i <= 0 || len <= 0) + break; + sg = sg_next(sg); + data = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), len); + } + } /* * URB_ZERO_PACKET means adding a 0-length packet, if direction @@ -819,10 +1011,10 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = cpu_to_le32(td->dma_handle); + *plink = LINK_TO_TD(uhci, td); - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, + uhci_add_td_to_urbp(td, urbp); + uhci_fill_td(uhci, td, status, destination | uhci_explen(0) | (toggle << TD_TOKEN_TOGGLE_SHIFT), data); @@ -837,7 +1029,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, * fast side but not enough to justify delaying an interrupt * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT * flag setting. */ - td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); + td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); /* * Build the new dummy TD and activate the old one @@ -845,11 +1037,11 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = cpu_to_le32(td->dma_handle); + *plink = LINK_TO_TD(uhci, td); - uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); + uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0); wmb(); - qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); + qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); qh->dummy_td = td; usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), @@ -858,115 +1050,203 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, nomem: /* Remove the dummy TD from the td_list so it doesn't get freed */ - uhci_remove_td_from_urb(qh->dummy_td); + uhci_remove_td_from_urbp(qh->dummy_td); return -ENOMEM; } -/* - * Common result for bulk and interrupt - */ -static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) +static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, + struct uhci_qh *qh) { - struct urb_priv *urbp = urb->hcpriv; - struct uhci_td *td; - unsigned int status = 0; - int ret = 0; + int ret; - urb->actual_length = 0; + /* Can't have low-speed bulk transfers */ + if (urb->dev->speed == USB_SPEED_LOW) + return -EINVAL; - list_for_each_entry(td, &urbp->td_list, list) { - unsigned int ctrlstat = td_status(td); + if (qh->state != QH_STATE_ACTIVE) + qh->skel = SKEL_BULK; + ret = uhci_submit_common(uhci, urb, qh); + if (ret == 0) + uhci_add_fsbr(uhci, urb); + return ret; +} - status = uhci_status_bits(ctrlstat); - if (status & TD_CTRL_ACTIVE) - return -EINPROGRESS; +static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, + struct uhci_qh *qh) +{ + int ret; + + /* USB 1.1 interrupt transfers only involve one packet per interval. + * Drivers can submit URBs of any length, but longer ones will need + * multiple intervals to complete. + */ - urb->actual_length += uhci_actual_length(ctrlstat); + if (!qh->bandwidth_reserved) { + int exponent; - if (status) - goto td_error; + /* Figure out which power-of-two queue to use */ + for (exponent = 7; exponent >= 0; --exponent) { + if ((1 << exponent) <= urb->interval) + break; + } + if (exponent < 0) + return -EINVAL; - if (uhci_actual_length(ctrlstat) < - uhci_expected_length(td_token(td))) { - if (urb->transfer_flags & URB_SHORT_NOT_OK) { - ret = -EREMOTEIO; - goto err; - } + /* If the slot is full, try a lower period */ + do { + qh->period = 1 << exponent; + qh->skel = SKEL_INDEX(exponent); - /* - * This URB stopped short of its end. We have to - * fix up the toggles of the following URBs on the - * queue and restart the queue. - * - * Do this only the first time we encounter the - * short URB. + /* For now, interrupt phase is fixed by the layout + * of the QH lists. */ - if (!urbp->short_transfer) { - urbp->short_transfer = 1; - urbp->qh->initial_toggle = - uhci_toggle(td_token(td)) ^ 1; - uhci_fixup_toggles(urbp->qh, 1); - - td = list_entry(urbp->td_list.prev, - struct uhci_td, list); - urbp->qh->element = td->link; - } - break; - } + qh->phase = (qh->period / 2) & (MAX_PHASE - 1); + ret = uhci_check_bandwidth(uhci, qh); + } while (ret != 0 && --exponent >= 0); + if (ret) + return ret; + } else if (qh->period > urb->interval) + return -EINVAL; /* Can't decrease the period */ + + ret = uhci_submit_common(uhci, urb, qh); + if (ret == 0) { + urb->interval = qh->period; + if (!qh->bandwidth_reserved) + uhci_reserve_bandwidth(uhci, qh); } + return ret; +} - return 0; +/* + * Fix up the data structures following a short transfer + */ +static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, + struct uhci_qh *qh, struct urb_priv *urbp) +{ + struct uhci_td *td; + struct list_head *tmp; + int ret; + + td = list_entry(urbp->td_list.prev, struct uhci_td, list); + if (qh->type == USB_ENDPOINT_XFER_CONTROL) { -td_error: - ret = uhci_map_status(status, uhci_packetout(td_token(td))); + /* When a control transfer is short, we have to restart + * the queue at the status stage transaction, which is + * the last TD. */ + WARN_ON(list_empty(&urbp->td_list)); + qh->element = LINK_TO_TD(uhci, td); + tmp = td->list.prev; + ret = -EINPROGRESS; - if ((debug == 1 && ret != -EPIPE) || debug > 1) { - /* Some debugging code */ - dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n", - __FUNCTION__, status); + } else { - if (debug > 1 && errbuf) { - /* Print the chain for debugging purposes */ - uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); - lprintk(errbuf); - } + /* When a bulk/interrupt transfer is short, we have to + * fix up the toggles of the following URBs on the queue + * before restarting the queue at the next URB. */ + qh->initial_toggle = + uhci_toggle(td_token(uhci, qh->post_td)) ^ 1; + uhci_fixup_toggles(uhci, qh, 1); + + if (list_empty(&urbp->td_list)) + td = qh->post_td; + qh->element = td->link; + tmp = urbp->td_list.prev; + ret = 0; } -err: - /* Note that the queue has stopped and save the next toggle value */ - urbp->qh->element = UHCI_PTR_TERM; - urbp->qh->is_stopped = 1; - urbp->qh->needs_fixup = 1; - urbp->qh->initial_toggle = uhci_toggle(td_token(td)) ^ - (ret == -EREMOTEIO); + /* Remove all the TDs we skipped over, from tmp back to the start */ + while (tmp != &urbp->td_list) { + td = list_entry(tmp, struct uhci_td, list); + tmp = tmp->prev; + + uhci_remove_td_from_urbp(td); + uhci_free_td(uhci, td); + } return ret; } -static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, - struct uhci_qh *qh) +/* + * Common result for control, bulk, and interrupt + */ +static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) { - int ret; + struct urb_priv *urbp = urb->hcpriv; + struct uhci_qh *qh = urbp->qh; + struct uhci_td *td, *tmp; + unsigned status; + int ret = 0; - /* Can't have low-speed bulk transfers */ - if (urb->dev->speed == USB_SPEED_LOW) - return -EINVAL; + list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { + unsigned int ctrlstat; + int len; - qh->skel = uhci->skel_bulk_qh; - ret = uhci_submit_common(uhci, urb, qh); - if (ret == 0) - uhci_inc_fsbr(uhci, urb); + ctrlstat = td_status(uhci, td); + status = uhci_status_bits(ctrlstat); + if (status & TD_CTRL_ACTIVE) + return -EINPROGRESS; + + len = uhci_actual_length(ctrlstat); + urb->actual_length += len; + + if (status) { + ret = uhci_map_status(status, + uhci_packetout(td_token(uhci, td))); + if ((debug == 1 && ret != -EPIPE) || debug > 1) { + /* Some debugging code */ + dev_dbg(&urb->dev->dev, + "%s: failed with status %x\n", + __func__, status); + + if (debug > 1 && errbuf) { + /* Print the chain for debugging */ + uhci_show_qh(uhci, urbp->qh, errbuf, + ERRBUF_LEN - EXTRA_SPACE, 0); + lprintk(errbuf); + } + } + + /* Did we receive a short packet? */ + } else if (len < uhci_expected_length(td_token(uhci, td))) { + + /* For control transfers, go to the status TD if + * this isn't already the last data TD */ + if (qh->type == USB_ENDPOINT_XFER_CONTROL) { + if (td->list.next != urbp->td_list.prev) + ret = 1; + } + + /* For bulk and interrupt, this may be an error */ + else if (urb->transfer_flags & URB_SHORT_NOT_OK) + ret = -EREMOTEIO; + + /* Fixup needed only if this isn't the URB's last TD */ + else if (&td->list != urbp->td_list.prev) + ret = 1; + } + + uhci_remove_td_from_urbp(td); + if (qh->post_td) + uhci_free_td(uhci, qh->post_td); + qh->post_td = td; + + if (ret != 0) + goto err; + } return ret; -} -static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, - struct uhci_qh *qh) -{ - /* USB 1.1 interrupt transfers only involve one packet per interval. - * Drivers can submit URBs of any length, but longer ones will need - * multiple intervals to complete. - */ - qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)]; - return uhci_submit_common(uhci, urb, qh); +err: + if (ret < 0) { + /* Note that the queue has stopped and save + * the next toggle value */ + qh->element = UHCI_PTR_TERM(uhci); + qh->is_stopped = 1; + qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); + qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^ + (ret == -EREMOTEIO); + + } else /* Short packet received */ + ret = uhci_fixup_short_transfer(uhci, qh, urbp); + return ret; } /* @@ -976,100 +1256,157 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh *qh) { struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ - int i, frame; + int i; + unsigned frame, next; unsigned long destination, status; struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; - if (urb->number_of_packets > 900) /* 900? Why? */ + /* Values must not be too big (could overflow below) */ + if (urb->interval >= UHCI_NUMFRAMES || + urb->number_of_packets >= UHCI_NUMFRAMES) return -EFBIG; - status = TD_CTRL_ACTIVE | TD_CTRL_IOS; - destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); + uhci_get_current_frame_number(uhci); - /* Figure out the starting frame number */ - if (urb->transfer_flags & URB_ISO_ASAP) { - if (list_empty(&qh->queue)) { - uhci_get_current_frame_number(uhci); - urb->start_frame = (uhci->frame_number + 10); + /* Check the period and figure out the starting frame number */ + if (!qh->bandwidth_reserved) { + qh->period = urb->interval; + qh->phase = -1; /* Find the best phase */ + i = uhci_check_bandwidth(uhci, qh); + if (i) + return i; + + /* Allow a little time to allocate the TDs */ + next = uhci->frame_number + 10; + frame = qh->phase; + + /* Round up to the first available slot */ + frame += (next - frame + qh->period - 1) & -qh->period; + + } else if (qh->period != urb->interval) { + return -EINVAL; /* Can't change the period */ - } else { /* Go right after the last one */ - struct urb *last_urb; + } else { + next = uhci->frame_number + 1; + + /* Find the next unused frame */ + if (list_empty(&qh->queue)) { + frame = qh->iso_frame; + } else { + struct urb *lurb; - last_urb = list_entry(qh->queue.prev, + lurb = list_entry(qh->queue.prev, struct urb_priv, node)->urb; - urb->start_frame = (last_urb->start_frame + - last_urb->number_of_packets * - last_urb->interval); + frame = lurb->start_frame + + lurb->number_of_packets * + lurb->interval; + } + + /* Fell behind? */ + if (!uhci_frame_before_eq(next, frame)) { + + /* USB_ISO_ASAP: Round up to the first available slot */ + if (urb->transfer_flags & URB_ISO_ASAP) + frame += (next - frame + qh->period - 1) & + -qh->period; + + /* + * Not ASAP: Use the next slot in the stream, + * no matter what. + */ + else if (!uhci_frame_before_eq(next, + frame + (urb->number_of_packets - 1) * + qh->period)) + dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n", + urb, frame, + (urb->number_of_packets - 1) * + qh->period, + next); } - } else { - /* FIXME: Sanity check */ } - urb->start_frame &= (UHCI_NUMFRAMES - 1); + + /* Make sure we won't have to go too far into the future */ + if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, + frame + urb->number_of_packets * urb->interval)) + return -EFBIG; + urb->start_frame = frame; + + status = TD_CTRL_ACTIVE | TD_CTRL_IOS; + destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); for (i = 0; i < urb->number_of_packets; i++) { td = uhci_alloc_td(uhci); if (!td) return -ENOMEM; - uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination | + uhci_add_td_to_urbp(td, urbp); + uhci_fill_td(uhci, td, status, destination | uhci_explen(urb->iso_frame_desc[i].length), urb->transfer_dma + urb->iso_frame_desc[i].offset); } /* Set the interrupt-on-completion flag on the last packet. */ - td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); - - qh->skel = uhci->skel_iso_qh; + td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); /* Add the TDs to the frame list */ frame = urb->start_frame; list_for_each_entry(td, &urbp->td_list, list) { uhci_insert_td_in_frame_list(uhci, td, frame); - frame += urb->interval; + frame += qh->period; + } + + if (list_empty(&qh->queue)) { + qh->iso_packet_desc = &urb->iso_frame_desc[0]; + qh->iso_frame = urb->start_frame; } + qh->skel = SKEL_ISO; + if (!qh->bandwidth_reserved) + uhci_reserve_bandwidth(uhci, qh); return 0; } static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) { - struct uhci_td *td; - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - int status; - int i, ret = 0; - - urb->actual_length = urb->error_count = 0; + struct uhci_td *td, *tmp; + struct urb_priv *urbp = urb->hcpriv; + struct uhci_qh *qh = urbp->qh; - i = 0; - list_for_each_entry(td, &urbp->td_list, list) { + list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { + unsigned int ctrlstat; + int status; int actlength; - unsigned int ctrlstat = td_status(td); - if (ctrlstat & TD_CTRL_ACTIVE) + if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame)) return -EINPROGRESS; - actlength = uhci_actual_length(ctrlstat); - urb->iso_frame_desc[i].actual_length = actlength; - urb->actual_length += actlength; + uhci_remove_tds_from_frame(uhci, qh->iso_frame); - status = uhci_map_status(uhci_status_bits(ctrlstat), - usb_pipeout(urb->pipe)); - urb->iso_frame_desc[i].status = status; - if (status) { - urb->error_count++; - ret = status; + ctrlstat = td_status(uhci, td); + if (ctrlstat & TD_CTRL_ACTIVE) { + status = -EXDEV; /* TD was added too late? */ + } else { + status = uhci_map_status(uhci_status_bits(ctrlstat), + usb_pipeout(urb->pipe)); + actlength = uhci_actual_length(ctrlstat); + + urb->actual_length += actlength; + qh->iso_packet_desc->actual_length = actlength; + qh->iso_packet_desc->status = status; } + if (status) + urb->error_count++; - i++; + uhci_remove_td_from_urbp(td); + uhci_free_td(uhci, td); + qh->iso_frame += qh->period; + ++qh->iso_packet_desc; } - - return ret; + return 0; } static int uhci_urb_enqueue(struct usb_hcd *hcd, - struct usb_host_endpoint *hep, struct urb *urb, gfp_t mem_flags) { int ret; @@ -1077,139 +1414,131 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, unsigned long flags; struct urb_priv *urbp; struct uhci_qh *qh; - int bustime; spin_lock_irqsave(&uhci->lock, flags); - ret = urb->status; - if (ret != -EINPROGRESS) /* URB already unlinked! */ - goto done; + ret = usb_hcd_link_urb_to_ep(hcd, urb); + if (ret) + goto done_not_linked; ret = -ENOMEM; urbp = uhci_alloc_urb_priv(uhci, urb); if (!urbp) goto done; - if (hep->hcpriv) - qh = (struct uhci_qh *) hep->hcpriv; + if (urb->ep->hcpriv) + qh = urb->ep->hcpriv; else { - qh = uhci_alloc_qh(uhci, urb->dev, hep); + qh = uhci_alloc_qh(uhci, urb->dev, urb->ep); if (!qh) goto err_no_qh; } urbp->qh = qh; - switch (usb_pipetype(urb->pipe)) { - case PIPE_CONTROL: + switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: ret = uhci_submit_control(uhci, urb, qh); break; - case PIPE_BULK: + case USB_ENDPOINT_XFER_BULK: ret = uhci_submit_bulk(uhci, urb, qh); break; - case PIPE_INTERRUPT: - if (list_empty(&qh->queue)) { - bustime = usb_check_bandwidth(urb->dev, urb); - if (bustime < 0) - ret = bustime; - else { - ret = uhci_submit_interrupt(uhci, urb, qh); - if (ret == 0) - usb_claim_bandwidth(urb->dev, urb, bustime, 0); - } - } else { /* inherit from parent */ - struct urb_priv *eurbp; - - eurbp = list_entry(qh->queue.prev, struct urb_priv, - node); - urb->bandwidth = eurbp->urb->bandwidth; - ret = uhci_submit_interrupt(uhci, urb, qh); - } + case USB_ENDPOINT_XFER_INT: + ret = uhci_submit_interrupt(uhci, urb, qh); break; - case PIPE_ISOCHRONOUS: - bustime = usb_check_bandwidth(urb->dev, urb); - if (bustime < 0) { - ret = bustime; - break; - } - + case USB_ENDPOINT_XFER_ISOC: + urb->error_count = 0; ret = uhci_submit_isochronous(uhci, urb, qh); - if (ret == 0) - usb_claim_bandwidth(urb->dev, urb, bustime, 1); break; } if (ret != 0) goto err_submit_failed; /* Add this URB to the QH */ - urbp->qh = qh; list_add_tail(&urbp->node, &qh->queue); /* If the new URB is the first and only one on this QH then either * the QH is new and idle or else it's unlinked and waiting to - * become idle, so we can activate it right away. */ - if (qh->queue.next == &urbp->node) + * become idle, so we can activate it right away. But only if the + * queue isn't stopped. */ + if (qh->queue.next == &urbp->node && !qh->is_stopped) { uhci_activate_qh(uhci, qh); + uhci_urbp_wants_fsbr(uhci, urbp); + } goto done; err_submit_failed: if (qh->state == QH_STATE_IDLE) uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ - err_no_qh: uhci_free_urb_priv(uhci, urbp); - done: + if (ret) + usb_hcd_unlink_urb_from_ep(hcd, urb); +done_not_linked: spin_unlock_irqrestore(&uhci->lock, flags); return ret; } -static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) +static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned long flags; - struct urb_priv *urbp; + struct uhci_qh *qh; + int rc; spin_lock_irqsave(&uhci->lock, flags); - urbp = urb->hcpriv; - if (!urbp) /* URB was never linked! */ + rc = usb_hcd_check_unlink_urb(hcd, urb, status); + if (rc) goto done; + qh = ((struct urb_priv *) urb->hcpriv)->qh; + /* Remove Isochronous TDs from the frame list ASAP */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + if (qh->type == USB_ENDPOINT_XFER_ISOC) { uhci_unlink_isochronous_tds(uhci, urb); - uhci_unlink_qh(uhci, urbp->qh); + mb(); + + /* If the URB has already started, update the QH unlink time */ + uhci_get_current_frame_number(uhci); + if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) + qh->unlink_frame = uhci->frame_number; + } + + uhci_unlink_qh(uhci, qh); done: spin_unlock_irqrestore(&uhci->lock, flags); - return 0; + return rc; } /* * Finish unlinking an URB and give it back */ static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, - struct urb *urb, struct pt_regs *regs) + struct urb *urb, int status) __releases(uhci->lock) __acquires(uhci->lock) { struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; - /* Isochronous TDs get unlinked directly from the frame list */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - uhci_unlink_isochronous_tds(uhci, urb); + if (qh->type == USB_ENDPOINT_XFER_CONTROL) { - /* If the URB isn't first on its queue, adjust the link pointer - * of the last TD in the previous URB. */ - else if (qh->queue.next != &urbp->node) { - struct urb_priv *purbp; - struct uhci_td *ptd, *ltd; + /* Subtract off the length of the SETUP packet from + * urb->actual_length. + */ + urb->actual_length -= min_t(u32, 8, urb->actual_length); + } - purbp = list_entry(urbp->node.prev, struct urb_priv, node); - ptd = list_entry(purbp->td_list.prev, struct uhci_td, - list); - ltd = list_entry(urbp->td_list.prev, struct uhci_td, - list); - ptd->link = ltd->link; + /* When giving back the first URB in an Isochronous queue, + * reinitialize the QH's iso-related members for the next URB. */ + else if (qh->type == USB_ENDPOINT_XFER_ISOC && + urbp->node.prev == &qh->queue && + urbp->node.next != &qh->queue) { + struct urb *nurb = list_entry(urbp->node.next, + struct urb_priv, node)->urb; + + qh->iso_packet_desc = &nurb->iso_frame_desc[0]; + qh->iso_frame = nurb->start_frame; } /* Take the URB off the QH's queue. If the queue is now empty, @@ -1221,37 +1550,19 @@ __acquires(uhci->lock) qh->needs_fixup = 0; } - uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ uhci_free_urb_priv(uhci, urbp); - - switch (usb_pipetype(urb->pipe)) { - case PIPE_ISOCHRONOUS: - /* Release bandwidth for Interrupt or Isoc. transfers */ - if (urb->bandwidth) - usb_release_bandwidth(urb->dev, urb, 1); - break; - case PIPE_INTERRUPT: - /* Release bandwidth for Interrupt or Isoc. transfers */ - /* Make sure we don't release if we have a queued URB */ - if (list_empty(&qh->queue) && urb->bandwidth) - usb_release_bandwidth(urb->dev, urb, 0); - else - /* bandwidth was passed on to queued URB, */ - /* so don't let usb_unlink_urb() release it */ - urb->bandwidth = 0; - break; - } + usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb); spin_unlock(&uhci->lock); - usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs); + usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status); spin_lock(&uhci->lock); /* If the queue is now empty, we can unlink the QH and give up its * reserved bandwidth. */ if (list_empty(&qh->queue)) { uhci_unlink_qh(uhci, qh); - - /* Bandwidth stuff not yet implemented */ + if (qh->bandwidth_reserved) + uhci_release_bandwidth(uhci, qh); } } @@ -1262,8 +1573,7 @@ __acquires(uhci->lock) (qh->state == QH_STATE_UNLINKING && \ uhci->frame_number + uhci->is_stopped != qh->unlink_frame) -static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, - struct pt_regs *regs) +static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct urb_priv *urbp; struct urb *urb; @@ -1273,50 +1583,48 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, urbp = list_entry(qh->queue.next, struct urb_priv, node); urb = urbp->urb; - switch (usb_pipetype(urb->pipe)) { - case PIPE_CONTROL: - status = uhci_result_control(uhci, urb); - break; - case PIPE_ISOCHRONOUS: + if (qh->type == USB_ENDPOINT_XFER_ISOC) status = uhci_result_isochronous(uhci, urb); - break; - default: /* PIPE_BULK or PIPE_INTERRUPT */ + else status = uhci_result_common(uhci, urb); - break; - } if (status == -EINPROGRESS) break; - spin_lock(&urb->lock); - if (urb->status == -EINPROGRESS) /* Not dequeued */ - urb->status = status; - else - status = -ECONNRESET; - spin_unlock(&urb->lock); - /* Dequeued but completed URBs can't be given back unless * the QH is stopped or has finished unlinking. */ - if (status == -ECONNRESET && - !(qh->is_stopped || QH_FINISHED_UNLINKING(qh))) - return; + if (urb->unlinked) { + if (QH_FINISHED_UNLINKING(qh)) + qh->is_stopped = 1; + else if (!qh->is_stopped) + return; + } - uhci_giveback_urb(uhci, qh, urb, regs); - if (qh->is_stopped) + uhci_giveback_urb(uhci, qh, urb, status); + if (status < 0) break; } /* If the QH is neither stopped nor finished unlinking (normal case), * our work here is done. */ - restart: - if (!(qh->is_stopped || QH_FINISHED_UNLINKING(qh))) + if (QH_FINISHED_UNLINKING(qh)) + qh->is_stopped = 1; + else if (!qh->is_stopped) return; /* Otherwise give back each of the dequeued URBs */ +restart: list_for_each_entry(urbp, &qh->queue, node) { urb = urbp->urb; - if (urb->status != -EINPROGRESS) { - uhci_save_toggle(qh, urb); - uhci_giveback_urb(uhci, qh, urb, regs); + if (urb->unlinked) { + + /* Fix up the TD links and save the toggles for + * non-Isochronous queues. For Isochronous queues, + * test for too-recent dequeues. */ + if (!uhci_cleanup_queue(uhci, qh, urb)) { + qh->is_stopped = 0; + return; + } + uhci_giveback_urb(uhci, qh, urb, 0); goto restart; } } @@ -1326,7 +1634,19 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, * queue, the QH can now be re-activated. */ if (!list_empty(&qh->queue)) { if (qh->needs_fixup) - uhci_fixup_toggles(qh, 0); + uhci_fixup_toggles(uhci, qh, 0); + + /* If the first URB on the queue wants FSBR but its time + * limit has expired, set the next TD to interrupt on + * completion before reactivating the QH. */ + urbp = list_entry(qh->queue.next, struct urb_priv, node); + if (urbp->fsbr && qh->wait_expired) { + struct uhci_td *td = list_entry(urbp->td_list.next, + struct uhci_td, list); + + td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); + } + uhci_activate_qh(uhci, qh); } @@ -1336,21 +1656,90 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, uhci_make_qh_idle(uhci, qh); } -static void uhci_free_pending_tds(struct uhci_hcd *uhci) +/* + * Check for queues that have made some forward progress. + * Returns 0 if the queue is not Isochronous, is ACTIVE, and + * has not advanced since last examined; 1 otherwise. + * + * Early Intel controllers have a bug which causes qh->element sometimes + * not to advance when a TD completes successfully. The queue remains + * stuck on the inactive completed TD. We detect such cases and advance + * the element pointer by hand. + */ +static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) { - struct uhci_td *td, *tmp; + struct urb_priv *urbp = NULL; + struct uhci_td *td; + int ret = 1; + unsigned status; - list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) { - list_del_init(&td->remove_list); + if (qh->type == USB_ENDPOINT_XFER_ISOC) + goto done; - uhci_free_td(uhci, td); + /* Treat an UNLINKING queue as though it hasn't advanced. + * This is okay because reactivation will treat it as though + * it has advanced, and if it is going to become IDLE then + * this doesn't matter anyway. Furthermore it's possible + * for an UNLINKING queue not to have any URBs at all, or + * for its first URB not to have any TDs (if it was dequeued + * just as it completed). So it's not easy in any case to + * test whether such queues have advanced. */ + if (qh->state != QH_STATE_ACTIVE) { + urbp = NULL; + status = 0; + + } else { + urbp = list_entry(qh->queue.next, struct urb_priv, node); + td = list_entry(urbp->td_list.next, struct uhci_td, list); + status = td_status(uhci, td); + if (!(status & TD_CTRL_ACTIVE)) { + + /* We're okay, the queue has advanced */ + qh->wait_expired = 0; + qh->advance_jiffies = jiffies; + goto done; + } + ret = uhci->is_stopped; + } + + /* The queue hasn't advanced; check for timeout */ + if (qh->wait_expired) + goto done; + + if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { + + /* Detect the Intel bug and work around it */ + if (qh->post_td && qh_element(qh) == + LINK_TO_TD(uhci, qh->post_td)) { + qh->element = qh->post_td->link; + qh->advance_jiffies = jiffies; + ret = 1; + goto done; + } + + qh->wait_expired = 1; + + /* If the current URB wants FSBR, unlink it temporarily + * so that we can safely set the next TD to interrupt on + * completion. That way we'll know as soon as the queue + * starts moving again. */ + if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) + uhci_unlink_qh(uhci, qh); + + } else { + /* Unmoving but not-yet-expired queues keep FSBR alive */ + if (urbp) + uhci_urbp_wants_fsbr(uhci, urbp); } + +done: + return ret; } /* * Process events in the schedule, but only in one thread at a time */ -static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) +static void uhci_scan_schedule(struct uhci_hcd *uhci) { int i; struct uhci_qh *qh; @@ -1361,14 +1750,13 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) return; } uhci->scan_in_progress = 1; - rescan: +rescan: uhci->need_rescan = 0; + uhci->fsbr_is_wanted = 0; uhci_clear_next_interrupt(uhci); uhci_get_current_frame_number(uhci); - - if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) - uhci_free_pending_tds(uhci); + uhci->cur_iso_frame = uhci->frame_number; /* Go through all the QH queues and process the URBs in each one */ for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { @@ -1377,33 +1765,30 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) while ((qh = uhci->next_qh) != uhci->skelqh[i]) { uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, node); - uhci_scan_qh(uhci, qh, regs); + + if (uhci_advance_check(uhci, qh)) { + uhci_scan_qh(uhci, qh); + if (qh->state == QH_STATE_ACTIVE) { + uhci_urbp_wants_fsbr(uhci, + list_entry(qh->queue.next, struct urb_priv, node)); + } + } } } + uhci->last_iso_frame = uhci->cur_iso_frame; if (uhci->need_rescan) goto rescan; uhci->scan_in_progress = 0; - /* If the controller is stopped, we can finish these off right now */ - if (uhci->is_stopped) - uhci_free_pending_tds(uhci); + if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted && + !uhci->fsbr_expiring) { + uhci->fsbr_expiring = 1; + mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY); + } - if (list_empty(&uhci->td_remove_list) && - list_empty(&uhci->skel_unlink_qh->node)) + if (list_empty(&uhci->skel_unlink_qh->node)) uhci_clear_next_interrupt(uhci); else uhci_set_next_interrupt(uhci); } - -static void check_fsbr(struct uhci_hcd *uhci) -{ - /* For now, don't scan URBs for FSBR timeouts. - * Add it back in later... */ - - /* Really disable FSBR */ - if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) { - uhci->fsbrtimeout = 0; - uhci->skel_term_qh->link = UHCI_PTR_TERM; - } -} |
