aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/host/uhci-q.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
-rw-r--r--drivers/usb/host/uhci-q.c755
1 files changed, 504 insertions, 251 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 30b88459ac7..da6f56d996c 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -13,7 +13,7 @@
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
* support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
- * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
*/
@@ -29,12 +29,12 @@ static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
{
if (uhci->is_stopped)
mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
- uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
+ uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
}
static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
{
- uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
+ uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
}
@@ -45,15 +45,27 @@ static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
*/
static void uhci_fsbr_on(struct uhci_hcd *uhci)
{
+ struct uhci_qh *lqh;
+
+ /* The terminating skeleton QH always points back to the first
+ * FSBR QH. Make the last async QH point to the terminating
+ * skeleton QH. */
uhci->fsbr_is_on = 1;
- uhci->skel_term_qh->link = cpu_to_le32(
- uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
+ lqh = list_entry(uhci->skel_async_qh->node.prev,
+ struct uhci_qh, node);
+ lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
}
static void uhci_fsbr_off(struct uhci_hcd *uhci)
{
+ struct uhci_qh *lqh;
+
+ /* Remove the link from the last async QH to the terminating
+ * skeleton QH. */
uhci->fsbr_is_on = 0;
- uhci->skel_term_qh->link = UHCI_PTR_TERM;
+ lqh = list_entry(uhci->skel_async_qh->node.prev,
+ struct uhci_qh, node);
+ lqh->link = UHCI_PTR_TERM(uhci);
}
static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
@@ -112,19 +124,19 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
if (!list_empty(&td->list))
- dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
+ dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
if (!list_empty(&td->fl_list))
- dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
+ dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
dma_pool_free(uhci->td_pool, td, td->dma_handle);
}
-static inline void uhci_fill_td(struct uhci_td *td, u32 status,
- u32 token, u32 buffer)
+static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
+ u32 status, u32 token, u32 buffer)
{
- td->status = cpu_to_le32(status);
- td->token = cpu_to_le32(token);
- td->buffer = cpu_to_le32(buffer);
+ td->status = cpu_to_hc32(uhci, status);
+ td->token = cpu_to_hc32(uhci, token);
+ td->buffer = cpu_to_hc32(uhci, buffer);
}
static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
@@ -158,11 +170,11 @@ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
td->link = ltd->link;
wmb();
- ltd->link = cpu_to_le32(td->dma_handle);
+ ltd->link = LINK_TO_TD(uhci, td);
} else {
td->link = uhci->frame[framenum];
wmb();
- uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
+ uhci->frame[framenum] = LINK_TO_TD(uhci, td);
uhci->frame_cpu[framenum] = td;
}
}
@@ -183,8 +195,10 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
} else {
struct uhci_td *ntd;
- ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
- uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
+ ntd = list_entry(td->fl_list.next,
+ struct uhci_td,
+ fl_list);
+ uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
uhci->frame_cpu[td->frame] = ntd;
}
} else {
@@ -241,23 +255,33 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
memset(qh, 0, sizeof(*qh));
qh->dma_handle = dma_handle;
- qh->element = UHCI_PTR_TERM;
- qh->link = UHCI_PTR_TERM;
+ qh->element = UHCI_PTR_TERM(uhci);
+ qh->link = UHCI_PTR_TERM(uhci);
INIT_LIST_HEAD(&qh->queue);
INIT_LIST_HEAD(&qh->node);
if (udev) { /* Normal QH */
- qh->dummy_td = uhci_alloc_td(uhci);
- if (!qh->dummy_td) {
- dma_pool_free(uhci->qh_pool, qh, dma_handle);
- return NULL;
+ qh->type = usb_endpoint_type(&hep->desc);
+ if (qh->type != USB_ENDPOINT_XFER_ISOC) {
+ qh->dummy_td = uhci_alloc_td(uhci);
+ if (!qh->dummy_td) {
+ dma_pool_free(uhci->qh_pool, qh, dma_handle);
+ return NULL;
+ }
}
qh->state = QH_STATE_IDLE;
qh->hep = hep;
qh->udev = udev;
hep->hcpriv = qh;
- qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ if (qh->type == USB_ENDPOINT_XFER_INT ||
+ qh->type == USB_ENDPOINT_XFER_ISOC)
+ qh->load = usb_calc_bus_time(udev->speed,
+ usb_endpoint_dir_in(&hep->desc),
+ qh->type == USB_ENDPOINT_XFER_ISOC,
+ usb_endpoint_maxp(&hep->desc))
+ / 1000 + 1;
} else { /* Skeleton QH */
qh->state = QH_STATE_ACTIVE;
@@ -270,12 +294,13 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
if (!list_empty(&qh->queue))
- dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
+ dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
list_del(&qh->node);
if (qh->udev) {
qh->hep->hcpriv = NULL;
- uhci_free_td(uhci, qh->dummy_td);
+ if (qh->dummy_td)
+ uhci_free_td(uhci, qh->dummy_td);
}
dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
}
@@ -323,11 +348,11 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
/* If the QH element pointer is UHCI_PTR_TERM then then currently
* executing URB has already been unlinked, so this one isn't it. */
- if (qh_element(qh) == UHCI_PTR_TERM)
+ if (qh_element(qh) == UHCI_PTR_TERM(uhci))
goto done;
- qh->element = UHCI_PTR_TERM;
+ qh->element = UHCI_PTR_TERM(uhci);
- /* Control pipes have to worry about toggles */
+ /* Control pipes don't have to worry about toggles */
if (qh->type == USB_ENDPOINT_XFER_CONTROL)
goto done;
@@ -335,7 +360,7 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
WARN_ON(list_empty(&urbp->td_list));
td = list_entry(urbp->td_list.next, struct uhci_td, list);
qh->needs_fixup = 1;
- qh->initial_toggle = uhci_toggle(td_token(td));
+ qh->initial_toggle = uhci_toggle(td_token(uhci, td));
done:
return ret;
@@ -345,7 +370,8 @@ done:
* Fix up the data toggles for URBs in a queue, when one of them
* terminates early (short transfer, error, or dequeued).
*/
-static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
+static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
+ int skip_first)
{
struct urb_priv *urbp = NULL;
struct uhci_td *td;
@@ -359,7 +385,7 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
/* When starting with the first URB, if the QH element pointer is
* still valid then we know the URB's toggles are okay. */
- else if (qh_element(qh) != UHCI_PTR_TERM)
+ else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
toggle = 2;
/* Fix up the toggle for the URBs in the queue. Normally this
@@ -371,15 +397,15 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
/* If the first TD has the right toggle value, we don't
* need to change any toggles in this URB */
td = list_entry(urbp->td_list.next, struct uhci_td, list);
- if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
+ if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
td = list_entry(urbp->td_list.prev, struct uhci_td,
list);
- toggle = uhci_toggle(td_token(td)) ^ 1;
+ toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
/* Otherwise all the toggles in the URB have to be switched */
} else {
list_for_each_entry(td, &urbp->td_list, list) {
- td->token ^= __constant_cpu_to_le32(
+ td->token ^= cpu_to_hc32(uhci,
TD_TOKEN_TOGGLE);
toggle ^= 1;
}
@@ -394,23 +420,77 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
}
/*
- * Put a QH on the schedule in both hardware and software
+ * Link an Isochronous QH into its skeleton's list
*/
-static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
+
+ /* Isochronous QHs aren't linked by the hardware */
+}
+
+/*
+ * Link a high-period interrupt QH into the schedule at the end of its
+ * skeleton's list
+ */
+static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct uhci_qh *pqh;
+
+ list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
+
+ pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+ qh->link = pqh->link;
+ wmb();
+ pqh->link = LINK_TO_QH(uhci, qh);
+}
+
+/*
+ * Link a period-1 interrupt or async QH into the schedule at the
+ * correct spot in the async skeleton's list, and update the FSBR link
+ */
+static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
+ __hc32 link_to_new_qh;
+
+ /* Find the predecessor QH for our new one and insert it in the list.
+ * The list of QHs is expected to be short, so linear search won't
+ * take too long. */
+ list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
+ if (pqh->skel <= qh->skel)
+ break;
+ }
+ list_add(&qh->node, &pqh->node);
+ /* Link it into the schedule */
+ qh->link = pqh->link;
+ wmb();
+ link_to_new_qh = LINK_TO_QH(uhci, qh);
+ pqh->link = link_to_new_qh;
+
+ /* If this is now the first FSBR QH, link the terminating skeleton
+ * QH to it. */
+ if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
+ uhci->skel_term_qh->link = link_to_new_qh;
+}
+
+/*
+ * Put a QH on the schedule in both hardware and software
+ */
+static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
WARN_ON(list_empty(&qh->queue));
/* Set the element pointer if it isn't set already.
* This isn't needed for Isochronous queues, but it doesn't hurt. */
- if (qh_element(qh) == UHCI_PTR_TERM) {
+ if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
- qh->element = cpu_to_le32(td->dma_handle);
+ qh->element = LINK_TO_TD(uhci, td);
}
/* Treat the queue as if it has just advanced */
@@ -421,42 +501,74 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
return;
qh->state = QH_STATE_ACTIVE;
- /* Move the QH from its old list to the end of the appropriate
+ /* Move the QH from its old list to the correct spot in the appropriate
* skeleton's list */
if (qh == uhci->next_qh)
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
node);
- list_move_tail(&qh->node, &qh->skel->node);
+ list_del(&qh->node);
+
+ if (qh->skel == SKEL_ISO)
+ link_iso(uhci, qh);
+ else if (qh->skel < SKEL_ASYNC)
+ link_interrupt(uhci, qh);
+ else
+ link_async(uhci, qh);
+}
+
+/*
+ * Unlink a high-period interrupt QH from the schedule
+ */
+static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ struct uhci_qh *pqh;
- /* Link it into the schedule */
pqh = list_entry(qh->node.prev, struct uhci_qh, node);
- qh->link = pqh->link;
- wmb();
- pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle);
+ pqh->link = qh->link;
+ mb();
}
/*
- * Take a QH off the hardware schedule
+ * Unlink a period-1 interrupt or async QH from the schedule
*/
-static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
struct uhci_qh *pqh;
+ __hc32 link_to_next_qh = qh->link;
+
+ pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+ pqh->link = link_to_next_qh;
+
+ /* If this was the old first FSBR QH, link the terminating skeleton
+ * QH to the next (new first FSBR) QH. */
+ if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
+ uhci->skel_term_qh->link = link_to_next_qh;
+ mb();
+}
+/*
+ * Take a QH off the hardware schedule
+ */
+static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
if (qh->state == QH_STATE_UNLINKING)
return;
WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
qh->state = QH_STATE_UNLINKING;
/* Unlink the QH from the schedule and record when we did it */
- pqh = list_entry(qh->node.prev, struct uhci_qh, node);
- pqh->link = qh->link;
- mb();
+ if (qh->skel == SKEL_ISO)
+ ;
+ else if (qh->skel < SKEL_ASYNC)
+ unlink_interrupt(uhci, qh);
+ else
+ unlink_async(uhci, qh);
uhci_get_current_frame_number(uhci);
qh->unlink_frame = uhci->frame_number;
/* Force an interrupt so we know when the QH is fully unlinked */
- if (list_empty(&uhci->skel_unlink_qh->node))
+ if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
uhci_set_next_interrupt(uhci);
/* Move the QH from its old list to the end of the unlinking list */
@@ -493,20 +605,133 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
wake_up_all(&uhci->waitqh);
}
+/*
+ * Find the highest existing bandwidth load for a given phase and period.
+ */
+static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
+{
+ int highest_load = uhci->load[phase];
+
+ for (phase += period; phase < MAX_PHASE; phase += period)
+ highest_load = max_t(int, highest_load, uhci->load[phase]);
+ return highest_load;
+}
+
+/*
+ * Set qh->phase to the optimal phase for a periodic transfer and
+ * check whether the bandwidth requirement is acceptable.
+ */
+static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ int minimax_load;
+
+ /* Find the optimal phase (unless it is already set) and get
+ * its load value. */
+ if (qh->phase >= 0)
+ minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
+ else {
+ int phase, load;
+ int max_phase = min_t(int, MAX_PHASE, qh->period);
+
+ qh->phase = 0;
+ minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
+ for (phase = 1; phase < max_phase; ++phase) {
+ load = uhci_highest_load(uhci, phase, qh->period);
+ if (load < minimax_load) {
+ minimax_load = load;
+ qh->phase = phase;
+ }
+ }
+ }
+
+ /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
+ if (minimax_load + qh->load > 900) {
+ dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
+ "period %d, phase %d, %d + %d us\n",
+ qh->period, qh->phase, minimax_load, qh->load);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+/*
+ * Reserve a periodic QH's bandwidth in the schedule
+ */
+static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ int i;
+ int load = qh->load;
+ char *p = "??";
+
+ for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
+ uhci->load[i] += load;
+ uhci->total_load += load;
+ }
+ uhci_to_hcd(uhci)->self.bandwidth_allocated =
+ uhci->total_load / MAX_PHASE;
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
+ p = "INT";
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
+ p = "ISO";
+ break;
+ }
+ qh->bandwidth_reserved = 1;
+ dev_dbg(uhci_dev(uhci),
+ "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
+ "reserve", qh->udev->devnum,
+ qh->hep->desc.bEndpointAddress, p,
+ qh->period, qh->phase, load);
+}
+
+/*
+ * Release a periodic QH's bandwidth reservation
+ */
+static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+ int i;
+ int load = qh->load;
+ char *p = "??";
+
+ for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
+ uhci->load[i] -= load;
+ uhci->total_load -= load;
+ }
+ uhci_to_hcd(uhci)->self.bandwidth_allocated =
+ uhci->total_load / MAX_PHASE;
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
+ p = "INT";
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
+ p = "ISO";
+ break;
+ }
+ qh->bandwidth_reserved = 0;
+ dev_dbg(uhci_dev(uhci),
+ "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
+ "release", qh->udev->devnum,
+ qh->hep->desc.bEndpointAddress, p,
+ qh->period, qh->phase, load);
+}
+
static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
struct urb *urb)
{
struct urb_priv *urbp;
- urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC);
+ urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
if (!urbp)
return NULL;
- memset((void *)urbp, 0, sizeof(*urbp));
-
urbp->urb = urb;
urb->hcpriv = urbp;
-
+
INIT_LIST_HEAD(&urbp->node);
INIT_LIST_HEAD(&urbp->td_list);
@@ -519,7 +744,7 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
struct uhci_td *td, *tmp;
if (!list_empty(&urbp->node))
- dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
+ dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
urbp->urb);
list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
@@ -527,15 +752,14 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
uhci_free_td(uhci, td);
}
- urbp->urb->hcpriv = NULL;
kmem_cache_free(uhci_up_cachep, urbp);
}
/*
* Map status to standard result codes
*
- * <status> is (td_status(td) & 0xF60000), a.k.a.
- * uhci_status_bits(td_status(td)).
+ * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
+ * uhci_status_bits(td_status(uhci, td)).
* Note: <status> does not include the TD_CTRL_NAK bit.
* <dir_out> is True for output TDs and False for input TDs.
*/
@@ -568,11 +792,12 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
{
struct uhci_td *td;
unsigned long destination, status;
- int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
dma_addr_t data = urb->transfer_dma;
- __le32 *plink;
+ __hc32 *plink;
struct urb_priv *urbp = urb->hcpriv;
+ int skel;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
@@ -587,7 +812,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
*/
td = qh->dummy_td;
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status, destination | uhci_explen(8),
+ uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
urb->setup_dma);
plink = &td->link;
status |= TD_CTRL_ACTIVE;
@@ -596,8 +821,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
* If direction is "send", change the packet ID from SETUP (0x2D)
* to OUT (0xE1). Else change it from SETUP to IN (0x69) and
* set Short Packet Detect (SPD) for all data packets.
+ *
+ * 0-length transfers always get treated as "send".
*/
- if (usb_pipeout(urb->pipe))
+ if (usb_pipeout(urb->pipe) || len == 0)
destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
else {
destination ^= (USB_PID_SETUP ^ USB_PID_IN);
@@ -608,19 +835,24 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
* Build the DATA TDs
*/
while (len > 0) {
- int pktsze = min(len, maxsze);
+ int pktsze = maxsze;
+
+ if (len <= pktsze) { /* The last data packet */
+ pktsze = len;
+ status &= ~TD_CTRL_SPD;
+ }
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = cpu_to_le32(td->dma_handle);
+ *plink = LINK_TO_TD(uhci, td);
/* Alternate Data0/1 (start with Data1) */
destination ^= TD_TOKEN_TOGGLE;
-
+
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status, destination | uhci_explen(pktsze),
- data);
+ uhci_fill_td(uhci, td, status,
+ destination | uhci_explen(pktsze), data);
plink = &td->link;
data += pktsze;
@@ -628,29 +860,19 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
}
/*
- * Build the final TD for control status
+ * Build the final TD for control status
*/
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = cpu_to_le32(td->dma_handle);
-
- /*
- * It's IN if the pipe is an output pipe or we're not expecting
- * data back.
- */
- destination &= ~TD_TOKEN_PID_MASK;
- if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
- destination |= USB_PID_IN;
- else
- destination |= USB_PID_OUT;
+ *plink = LINK_TO_TD(uhci, td);
+ /* Change direction for the status transaction */
+ destination ^= (USB_PID_IN ^ USB_PID_OUT);
destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
- status &= ~TD_CTRL_SPD;
-
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status | TD_CTRL_IOC,
+ uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
destination | uhci_explen(0), 0);
plink = &td->link;
@@ -660,11 +882,11 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = cpu_to_le32(td->dma_handle);
+ *plink = LINK_TO_TD(uhci, td);
- uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
+ uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
wmb();
- qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
+ qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
qh->dummy_td = td;
/* Low-speed transfers get a different queue, and won't hog the bus.
@@ -673,13 +895,13 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
* isn't in the CONFIGURED state. */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->state != USB_STATE_CONFIGURED)
- qh->skel = uhci->skel_ls_control_qh;
+ skel = SKEL_LS_CONTROL;
else {
- qh->skel = uhci->skel_fs_control_qh;
+ skel = SKEL_FS_CONTROL;
uhci_add_fsbr(uhci, urb);
}
-
- urb->actual_length = -8; /* Account for the SETUP packet */
+ if (qh->state != QH_STATE_ACTIVE)
+ qh->skel = skel;
return 0;
nomem:
@@ -696,12 +918,15 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
{
struct uhci_td *td;
unsigned long destination, status;
- int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
+ int maxsze = usb_endpoint_maxp(&qh->hep->desc);
int len = urb->transfer_buffer_length;
- dma_addr_t data = urb->transfer_dma;
- __le32 *plink;
+ int this_sg_len;
+ dma_addr_t data;
+ __hc32 *plink;
struct urb_priv *urbp = urb->hcpriv;
unsigned int toggle;
+ struct scatterlist *sg;
+ int i;
if (len < 0)
return -EINVAL;
@@ -718,12 +943,26 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
if (usb_pipein(urb->pipe))
status |= TD_CTRL_SPD;
+ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ data = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ data = urb->transfer_dma;
+ this_sg_len = len;
+ }
/*
* Build the DATA TDs
*/
plink = NULL;
td = qh->dummy_td;
- do { /* Allow zero length packets */
+ for (;;) { /* Allow zero length packets */
int pktsze = maxsze;
if (len <= pktsze) { /* The last packet */
@@ -736,20 +975,28 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = cpu_to_le32(td->dma_handle);
+ *plink = LINK_TO_TD(uhci, td);
}
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status,
+ uhci_fill_td(uhci, td, status,
destination | uhci_explen(pktsze) |
(toggle << TD_TOKEN_TOGGLE_SHIFT),
data);
plink = &td->link;
status |= TD_CTRL_ACTIVE;
+ toggle ^= 1;
data += pktsze;
+ this_sg_len -= pktsze;
len -= maxsze;
- toggle ^= 1;
- } while (len > 0);
+ if (this_sg_len <= 0) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ data = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
+ }
/*
* URB_ZERO_PACKET means adding a 0-length packet, if direction
@@ -764,10 +1011,10 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = cpu_to_le32(td->dma_handle);
+ *plink = LINK_TO_TD(uhci, td);
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status,
+ uhci_fill_td(uhci, td, status,
destination | uhci_explen(0) |
(toggle << TD_TOKEN_TOGGLE_SHIFT),
data);
@@ -782,7 +1029,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
* fast side but not enough to justify delaying an interrupt
* more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
* flag setting. */
- td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
+ td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
/*
* Build the new dummy TD and activate the old one
@@ -790,13 +1037,12 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
td = uhci_alloc_td(uhci);
if (!td)
goto nomem;
- *plink = cpu_to_le32(td->dma_handle);
+ *plink = LINK_TO_TD(uhci, td);
- uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
+ uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
wmb();
- qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
+ qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
qh->dummy_td = td;
- qh->period = urb->interval;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), toggle);
@@ -808,7 +1054,7 @@ nomem:
return -ENOMEM;
}
-static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
+static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
int ret;
@@ -817,7 +1063,8 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
if (urb->dev->speed == USB_SPEED_LOW)
return -EINVAL;
- qh->skel = uhci->skel_bulk_qh;
+ if (qh->state != QH_STATE_ACTIVE)
+ qh->skel = SKEL_BULK;
ret = uhci_submit_common(uhci, urb, qh);
if (ret == 0)
uhci_add_fsbr(uhci, urb);
@@ -827,28 +1074,47 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
- int exponent;
+ int ret;
/* USB 1.1 interrupt transfers only involve one packet per interval.
* Drivers can submit URBs of any length, but longer ones will need
* multiple intervals to complete.
*/
- /* Figure out which power-of-two queue to use */
- for (exponent = 7; exponent >= 0; --exponent) {
- if ((1 << exponent) <= urb->interval)
- break;
- }
- if (exponent < 0)
- return -EINVAL;
- urb->interval = 1 << exponent;
+ if (!qh->bandwidth_reserved) {
+ int exponent;
- if (qh->period == 0)
- qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)];
- else if (qh->period != urb->interval)
- return -EINVAL; /* Can't change the period */
+ /* Figure out which power-of-two queue to use */
+ for (exponent = 7; exponent >= 0; --exponent) {
+ if ((1 << exponent) <= urb->interval)
+ break;
+ }
+ if (exponent < 0)
+ return -EINVAL;
- return uhci_submit_common(uhci, urb, qh);
+ /* If the slot is full, try a lower period */
+ do {
+ qh->period = 1 << exponent;
+ qh->skel = SKEL_INDEX(exponent);
+
+ /* For now, interrupt phase is fixed by the layout
+ * of the QH lists.
+ */
+ qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
+ ret = uhci_check_bandwidth(uhci, qh);
+ } while (ret != 0 && --exponent >= 0);
+ if (ret)
+ return ret;
+ } else if (qh->period > urb->interval)
+ return -EINVAL; /* Can't decrease the period */
+
+ ret = uhci_submit_common(uhci, urb, qh);
+ if (ret == 0) {
+ urb->interval = qh->period;
+ if (!qh->bandwidth_reserved)
+ uhci_reserve_bandwidth(uhci, qh);
+ }
+ return ret;
}
/*
@@ -868,7 +1134,7 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
* the queue at the status stage transaction, which is
* the last TD. */
WARN_ON(list_empty(&urbp->td_list));
- qh->element = cpu_to_le32(td->dma_handle);
+ qh->element = LINK_TO_TD(uhci, td);
tmp = td->list.prev;
ret = -EINPROGRESS;
@@ -877,8 +1143,9 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
/* When a bulk/interrupt transfer is short, we have to
* fix up the toggles of the following URBs on the queue
* before restarting the queue at the next URB. */
- qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
- uhci_fixup_toggles(qh, 1);
+ qh->initial_toggle =
+ uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
+ uhci_fixup_toggles(uhci, qh, 1);
if (list_empty(&urbp->td_list))
td = qh->post_td;
@@ -913,7 +1180,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
unsigned int ctrlstat;
int len;
- ctrlstat = td_status(td);
+ ctrlstat = td_status(uhci, td);
status = uhci_status_bits(ctrlstat);
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
@@ -923,25 +1190,33 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
if (status) {
ret = uhci_map_status(status,
- uhci_packetout(td_token(td)));
+ uhci_packetout(td_token(uhci, td)));
if ((debug == 1 && ret != -EPIPE) || debug > 1) {
/* Some debugging code */
dev_dbg(&urb->dev->dev,
"%s: failed with status %x\n",
- __FUNCTION__, status);
+ __func__, status);
if (debug > 1 && errbuf) {
/* Print the chain for debugging */
- uhci_show_qh(urbp->qh, errbuf,
- ERRBUF_LEN, 0);
+ uhci_show_qh(uhci, urbp->qh, errbuf,
+ ERRBUF_LEN - EXTRA_SPACE, 0);
lprintk(errbuf);
}
}
- } else if (len < uhci_expected_length(td_token(td))) {
+ /* Did we receive a short packet? */
+ } else if (len < uhci_expected_length(td_token(uhci, td))) {
+
+ /* For control transfers, go to the status TD if
+ * this isn't already the last data TD */
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (td->list.next != urbp->td_list.prev)
+ ret = 1;
+ }
- /* We received a short packet */
- if (urb->transfer_flags & URB_SHORT_NOT_OK)
+ /* For bulk and interrupt, this may be an error */
+ else if (urb->transfer_flags & URB_SHORT_NOT_OK)
ret = -EREMOTEIO;
/* Fixup needed only if this isn't the URB's last TD */
@@ -961,16 +1236,12 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
err:
if (ret < 0) {
- /* In case a control transfer gets an error
- * during the setup stage */
- urb->actual_length = max(urb->actual_length, 0);
-
/* Note that the queue has stopped and save
* the next toggle value */
- qh->element = UHCI_PTR_TERM;
+ qh->element = UHCI_PTR_TERM(uhci);
qh->is_stopped = 1;
qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
- qh->initial_toggle = uhci_toggle(td_token(td)) ^
+ qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
(ret == -EREMOTEIO);
} else /* Short packet received */
@@ -985,7 +1256,8 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
- int i, frame;
+ int i;
+ unsigned frame, next;
unsigned long destination, status;
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
@@ -994,20 +1266,30 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
urb->number_of_packets >= UHCI_NUMFRAMES)
return -EFBIG;
+ uhci_get_current_frame_number(uhci);
+
/* Check the period and figure out the starting frame number */
- if (qh->period == 0) {
- if (urb->transfer_flags & URB_ISO_ASAP) {
- uhci_get_current_frame_number(uhci);
- urb->start_frame = uhci->frame_number + 10;
- } else {
- i = urb->start_frame - uhci->last_iso_frame;
- if (i <= 0 || i >= UHCI_NUMFRAMES)
- return -EINVAL;
- }
+ if (!qh->bandwidth_reserved) {
+ qh->period = urb->interval;
+ qh->phase = -1; /* Find the best phase */
+ i = uhci_check_bandwidth(uhci, qh);
+ if (i)
+ return i;
+
+ /* Allow a little time to allocate the TDs */
+ next = uhci->frame_number + 10;
+ frame = qh->phase;
+
+ /* Round up to the first available slot */
+ frame += (next - frame + qh->period - 1) & -qh->period;
+
} else if (qh->period != urb->interval) {
return -EINVAL; /* Can't change the period */
- } else { /* Pick up where the last URB leaves off */
+ } else {
+ next = uhci->frame_number + 1;
+
+ /* Find the next unused frame */
if (list_empty(&qh->queue)) {
frame = qh->iso_frame;
} else {
@@ -1019,17 +1301,35 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
lurb->number_of_packets *
lurb->interval;
}
- if (urb->transfer_flags & URB_ISO_ASAP)
- urb->start_frame = frame;
- else if (urb->start_frame != frame)
- return -EINVAL;
+
+ /* Fell behind? */
+ if (!uhci_frame_before_eq(next, frame)) {
+
+ /* USB_ISO_ASAP: Round up to the first available slot */
+ if (urb->transfer_flags & URB_ISO_ASAP)
+ frame += (next - frame + qh->period - 1) &
+ -qh->period;
+
+ /*
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
+ */
+ else if (!uhci_frame_before_eq(next,
+ frame + (urb->number_of_packets - 1) *
+ qh->period))
+ dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+ urb, frame,
+ (urb->number_of_packets - 1) *
+ qh->period,
+ next);
+ }
}
/* Make sure we won't have to go too far into the future */
if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
- urb->start_frame + urb->number_of_packets *
- urb->interval))
+ frame + urb->number_of_packets * urb->interval))
return -EFBIG;
+ urb->start_frame = frame;
status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
@@ -1040,17 +1340,14 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
return -ENOMEM;
uhci_add_td_to_urbp(td, urbp);
- uhci_fill_td(td, status, destination |
+ uhci_fill_td(uhci, td, status, destination |
uhci_explen(urb->iso_frame_desc[i].length),
urb->transfer_dma +
urb->iso_frame_desc[i].offset);
}
/* Set the interrupt-on-completion flag on the last packet. */
- td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
-
- qh->skel = uhci->skel_iso_qh;
- qh->period = urb->interval;
+ td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
/* Add the TDs to the frame list */
frame = urb->start_frame;
@@ -1062,9 +1359,11 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
if (list_empty(&qh->queue)) {
qh->iso_packet_desc = &urb->iso_frame_desc[0];
qh->iso_frame = urb->start_frame;
- qh->iso_status = 0;
}
+ qh->skel = SKEL_ISO;
+ if (!qh->bandwidth_reserved)
+ uhci_reserve_bandwidth(uhci, qh);
return 0;
}
@@ -1084,7 +1383,7 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
uhci_remove_tds_from_frame(uhci, qh->iso_frame);
- ctrlstat = td_status(td);
+ ctrlstat = td_status(uhci, td);
if (ctrlstat & TD_CTRL_ACTIVE) {
status = -EXDEV; /* TD was added too late? */
} else {
@@ -1096,22 +1395,18 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
qh->iso_packet_desc->actual_length = actlength;
qh->iso_packet_desc->status = status;
}
-
- if (status) {
+ if (status)
urb->error_count++;
- qh->iso_status = status;
- }
uhci_remove_td_from_urbp(td);
uhci_free_td(uhci, td);
qh->iso_frame += qh->period;
++qh->iso_packet_desc;
}
- return qh->iso_status;
+ return 0;
}
static int uhci_urb_enqueue(struct usb_hcd *hcd,
- struct usb_host_endpoint *hep,
struct urb *urb, gfp_t mem_flags)
{
int ret;
@@ -1119,23 +1414,22 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
unsigned long flags;
struct urb_priv *urbp;
struct uhci_qh *qh;
- int bustime;
spin_lock_irqsave(&uhci->lock, flags);
- ret = urb->status;
- if (ret != -EINPROGRESS) /* URB already unlinked! */
- goto done;
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto done_not_linked;
ret = -ENOMEM;
urbp = uhci_alloc_urb_priv(uhci, urb);
if (!urbp)
goto done;
- if (hep->hcpriv)
- qh = (struct uhci_qh *) hep->hcpriv;
+ if (urb->ep->hcpriv)
+ qh = urb->ep->hcpriv;
else {
- qh = uhci_alloc_qh(uhci, urb->dev, hep);
+ qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
if (!qh)
goto err_no_qh;
}
@@ -1149,42 +1443,17 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
ret = uhci_submit_bulk(uhci, urb, qh);
break;
case USB_ENDPOINT_XFER_INT:
- if (list_empty(&qh->queue)) {
- bustime = usb_check_bandwidth(urb->dev, urb);
- if (bustime < 0)
- ret = bustime;
- else {
- ret = uhci_submit_interrupt(uhci, urb, qh);
- if (ret == 0)
- usb_claim_bandwidth(urb->dev, urb, bustime, 0);
- }
- } else { /* inherit from parent */
- struct urb_priv *eurbp;
-
- eurbp = list_entry(qh->queue.prev, struct urb_priv,
- node);
- urb->bandwidth = eurbp->urb->bandwidth;
- ret = uhci_submit_interrupt(uhci, urb, qh);
- }
+ ret = uhci_submit_interrupt(uhci, urb, qh);
break;
case USB_ENDPOINT_XFER_ISOC:
urb->error_count = 0;
- bustime = usb_check_bandwidth(urb->dev, urb);
- if (bustime < 0) {
- ret = bustime;
- break;
- }
-
ret = uhci_submit_isochronous(uhci, urb, qh);
- if (ret == 0)
- usb_claim_bandwidth(urb->dev, urb, bustime, 1);
break;
}
if (ret != 0)
goto err_submit_failed;
/* Add this URB to the QH */
- urbp->qh = qh;
list_add_tail(&urbp->node, &qh->queue);
/* If the new URB is the first and only one on this QH then either
@@ -1200,27 +1469,29 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
err_submit_failed:
if (qh->state == QH_STATE_IDLE)
uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
-
err_no_qh:
uhci_free_urb_priv(uhci, urbp);
-
done:
+ if (ret)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+done_not_linked:
spin_unlock_irqrestore(&uhci->lock, flags);
return ret;
}
-static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
- struct urb_priv *urbp;
struct uhci_qh *qh;
+ int rc;
spin_lock_irqsave(&uhci->lock, flags);
- urbp = urb->hcpriv;
- if (!urbp) /* URB was never linked! */
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
goto done;
- qh = urbp->qh;
+
+ qh = ((struct urb_priv *) urb->hcpriv)->qh;
/* Remove Isochronous TDs from the frame list ASAP */
if (qh->type == USB_ENDPOINT_XFER_ISOC) {
@@ -1237,22 +1508,30 @@ static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
done:
spin_unlock_irqrestore(&uhci->lock, flags);
- return 0;
+ return rc;
}
/*
* Finish unlinking an URB and give it back
*/
static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
- struct urb *urb)
+ struct urb *urb, int status)
__releases(uhci->lock)
__acquires(uhci->lock)
{
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+
+ /* Subtract off the length of the SETUP packet from
+ * urb->actual_length.
+ */
+ urb->actual_length -= min_t(u32, 8, urb->actual_length);
+ }
+
/* When giving back the first URB in an Isochronous queue,
* reinitialize the QH's iso-related members for the next URB. */
- if (qh->type == USB_ENDPOINT_XFER_ISOC &&
+ else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
urbp->node.prev == &qh->queue &&
urbp->node.next != &qh->queue) {
struct urb *nurb = list_entry(urbp->node.next,
@@ -1260,7 +1539,6 @@ __acquires(uhci->lock)
qh->iso_packet_desc = &nurb->iso_frame_desc[0];
qh->iso_frame = nurb->start_frame;
- qh->iso_status = 0;
}
/* Take the URB off the QH's queue. If the queue is now empty,
@@ -1273,36 +1551,18 @@ __acquires(uhci->lock)
}
uhci_free_urb_priv(uhci, urbp);
-
- switch (qh->type) {
- case USB_ENDPOINT_XFER_ISOC:
- /* Release bandwidth for Interrupt or Isoc. transfers */
- if (urb->bandwidth)
- usb_release_bandwidth(urb->dev, urb, 1);
- break;
- case USB_ENDPOINT_XFER_INT:
- /* Release bandwidth for Interrupt or Isoc. transfers */
- /* Make sure we don't release if we have a queued URB */
- if (list_empty(&qh->queue) && urb->bandwidth)
- usb_release_bandwidth(urb->dev, urb, 0);
- else
- /* bandwidth was passed on to queued URB, */
- /* so don't let usb_unlink_urb() release it */
- urb->bandwidth = 0;
- break;
- }
+ usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
spin_unlock(&uhci->lock);
- usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);
+ usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
spin_lock(&uhci->lock);
/* If the queue is now empty, we can unlink the QH and give up its
* reserved bandwidth. */
if (list_empty(&qh->queue)) {
uhci_unlink_qh(uhci, qh);
-
- /* Bandwidth stuff not yet implemented */
- qh->period = 0;
+ if (qh->bandwidth_reserved)
+ uhci_release_bandwidth(uhci, qh);
}
}
@@ -1330,24 +1590,17 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
if (status == -EINPROGRESS)
break;
- spin_lock(&urb->lock);
- if (urb->status == -EINPROGRESS) /* Not dequeued */
- urb->status = status;
- else
- status = ECONNRESET; /* Not -ECONNRESET */
- spin_unlock(&urb->lock);
-
/* Dequeued but completed URBs can't be given back unless
* the QH is stopped or has finished unlinking. */
- if (status == ECONNRESET) {
+ if (urb->unlinked) {
if (QH_FINISHED_UNLINKING(qh))
qh->is_stopped = 1;
else if (!qh->is_stopped)
return;
}
- uhci_giveback_urb(uhci, qh, urb);
- if (status < 0 && qh->type != USB_ENDPOINT_XFER_ISOC)
+ uhci_giveback_urb(uhci, qh, urb, status);
+ if (status < 0)
break;
}
@@ -1362,7 +1615,7 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
restart:
list_for_each_entry(urbp, &qh->queue, node) {
urb = urbp->urb;
- if (urb->status != -EINPROGRESS) {
+ if (urb->unlinked) {
/* Fix up the TD links and save the toggles for
* non-Isochronous queues. For Isochronous queues,
@@ -1371,7 +1624,7 @@ restart:
qh->is_stopped = 0;
return;
}
- uhci_giveback_urb(uhci, qh, urb);
+ uhci_giveback_urb(uhci, qh, urb, 0);
goto restart;
}
}
@@ -1381,7 +1634,7 @@ restart:
* queue, the QH can now be re-activated. */
if (!list_empty(&qh->queue)) {
if (qh->needs_fixup)
- uhci_fixup_toggles(qh, 0);
+ uhci_fixup_toggles(uhci, qh, 0);
/* If the first URB on the queue wants FSBR but its time
* limit has expired, set the next TD to interrupt on
@@ -1391,7 +1644,7 @@ restart:
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
- td->status |= __cpu_to_le32(TD_CTRL_IOC);
+ td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
}
uhci_activate_qh(uhci, qh);
@@ -1438,7 +1691,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
} else {
urbp = list_entry(qh->queue.next, struct urb_priv, node);
td = list_entry(urbp->td_list.next, struct uhci_td, list);
- status = td_status(td);
+ status = td_status(uhci, td);
if (!(status & TD_CTRL_ACTIVE)) {
/* We're okay, the queue has advanced */
@@ -1446,7 +1699,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
qh->advance_jiffies = jiffies;
goto done;
}
- ret = 0;
+ ret = uhci->is_stopped;
}
/* The queue hasn't advanced; check for timeout */
@@ -1457,7 +1710,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
/* Detect the Intel bug and work around it */
if (qh->post_td && qh_element(qh) ==
- cpu_to_le32(qh->post_td->dma_handle)) {
+ LINK_TO_TD(uhci, qh->post_td)) {
qh->element = qh->post_td->link;
qh->advance_jiffies = jiffies;
ret = 1;