diff options
Diffstat (limited to 'drivers/usb/host/uhci-q.c')
| -rw-r--r-- | drivers/usb/host/uhci-q.c | 263 |
1 files changed, 148 insertions, 115 deletions
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index acd582c0280..da6f56d996c 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -29,12 +29,12 @@ static void uhci_set_next_interrupt(struct uhci_hcd *uhci) { if (uhci->is_stopped) mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); - uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); + uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); } static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) { - uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); + uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC); } @@ -53,7 +53,7 @@ static void uhci_fsbr_on(struct uhci_hcd *uhci) uhci->fsbr_is_on = 1; lqh = list_entry(uhci->skel_async_qh->node.prev, struct uhci_qh, node); - lqh->link = LINK_TO_QH(uhci->skel_term_qh); + lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh); } static void uhci_fsbr_off(struct uhci_hcd *uhci) @@ -65,7 +65,7 @@ static void uhci_fsbr_off(struct uhci_hcd *uhci) uhci->fsbr_is_on = 0; lqh = list_entry(uhci->skel_async_qh->node.prev, struct uhci_qh, node); - lqh->link = UHCI_PTR_TERM; + lqh->link = UHCI_PTR_TERM(uhci); } static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) @@ -131,12 +131,12 @@ static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) dma_pool_free(uhci->td_pool, td, td->dma_handle); } -static inline void uhci_fill_td(struct uhci_td *td, u32 status, - u32 token, u32 buffer) +static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td, + u32 status, u32 token, u32 buffer) { - td->status = cpu_to_le32(status); - td->token = cpu_to_le32(token); - td->buffer = cpu_to_le32(buffer); + td->status = cpu_to_hc32(uhci, status); + td->token = cpu_to_hc32(uhci, token); + td->buffer = cpu_to_hc32(uhci, buffer); } static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) @@ -170,11 +170,11 @@ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, td->link = ltd->link; wmb(); - ltd->link = LINK_TO_TD(td); + ltd->link = LINK_TO_TD(uhci, td); } else { td->link = uhci->frame[framenum]; wmb(); - uhci->frame[framenum] = LINK_TO_TD(td); + uhci->frame[framenum] = LINK_TO_TD(uhci, td); uhci->frame_cpu[framenum] = td; } } @@ -195,8 +195,10 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, } else { struct uhci_td *ntd; - ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); - uhci->frame[td->frame] = LINK_TO_TD(ntd); + ntd = list_entry(td->fl_list.next, + struct uhci_td, + fl_list); + uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd); uhci->frame_cpu[td->frame] = ntd; } } else { @@ -253,8 +255,8 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, memset(qh, 0, sizeof(*qh)); qh->dma_handle = dma_handle; - qh->element = UHCI_PTR_TERM; - qh->link = UHCI_PTR_TERM; + qh->element = UHCI_PTR_TERM(uhci); + qh->link = UHCI_PTR_TERM(uhci); INIT_LIST_HEAD(&qh->queue); INIT_LIST_HEAD(&qh->node); @@ -278,7 +280,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, qh->load = usb_calc_bus_time(udev->speed, usb_endpoint_dir_in(&hep->desc), qh->type == USB_ENDPOINT_XFER_ISOC, - le16_to_cpu(hep->desc.wMaxPacketSize)) + usb_endpoint_maxp(&hep->desc)) / 1000 + 1; } else { /* Skeleton QH */ @@ -346,9 +348,9 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, /* If the QH element pointer is UHCI_PTR_TERM then then currently * executing URB has already been unlinked, so this one isn't it. */ - if (qh_element(qh) == UHCI_PTR_TERM) + if (qh_element(qh) == UHCI_PTR_TERM(uhci)) goto done; - qh->element = UHCI_PTR_TERM; + qh->element = UHCI_PTR_TERM(uhci); /* Control pipes don't have to worry about toggles */ if (qh->type == USB_ENDPOINT_XFER_CONTROL) @@ -358,7 +360,7 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, WARN_ON(list_empty(&urbp->td_list)); td = list_entry(urbp->td_list.next, struct uhci_td, list); qh->needs_fixup = 1; - qh->initial_toggle = uhci_toggle(td_token(td)); + qh->initial_toggle = uhci_toggle(td_token(uhci, td)); done: return ret; @@ -368,7 +370,8 @@ done: * Fix up the data toggles for URBs in a queue, when one of them * terminates early (short transfer, error, or dequeued). */ -static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) +static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh, + int skip_first) { struct urb_priv *urbp = NULL; struct uhci_td *td; @@ -382,7 +385,7 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) /* When starting with the first URB, if the QH element pointer is * still valid then we know the URB's toggles are okay. */ - else if (qh_element(qh) != UHCI_PTR_TERM) + else if (qh_element(qh) != UHCI_PTR_TERM(uhci)) toggle = 2; /* Fix up the toggle for the URBs in the queue. Normally this @@ -394,15 +397,15 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) /* If the first TD has the right toggle value, we don't * need to change any toggles in this URB */ td = list_entry(urbp->td_list.next, struct uhci_td, list); - if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { + if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) { td = list_entry(urbp->td_list.prev, struct uhci_td, list); - toggle = uhci_toggle(td_token(td)) ^ 1; + toggle = uhci_toggle(td_token(uhci, td)) ^ 1; /* Otherwise all the toggles in the URB have to be switched */ } else { list_for_each_entry(td, &urbp->td_list, list) { - td->token ^= cpu_to_le32( + td->token ^= cpu_to_hc32(uhci, TD_TOKEN_TOGGLE); toggle ^= 1; } @@ -439,7 +442,7 @@ static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) pqh = list_entry(qh->node.prev, struct uhci_qh, node); qh->link = pqh->link; wmb(); - pqh->link = LINK_TO_QH(qh); + pqh->link = LINK_TO_QH(uhci, qh); } /* @@ -449,7 +452,7 @@ static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; - __le32 link_to_new_qh; + __hc32 link_to_new_qh; /* Find the predecessor QH for our new one and insert it in the list. * The list of QHs is expected to be short, so linear search won't @@ -463,7 +466,7 @@ static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) /* Link it into the schedule */ qh->link = pqh->link; wmb(); - link_to_new_qh = LINK_TO_QH(qh); + link_to_new_qh = LINK_TO_QH(uhci, qh); pqh->link = link_to_new_qh; /* If this is now the first FSBR QH, link the terminating skeleton @@ -481,13 +484,13 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) /* Set the element pointer if it isn't set already. * This isn't needed for Isochronous queues, but it doesn't hurt. */ - if (qh_element(qh) == UHCI_PTR_TERM) { + if (qh_element(qh) == UHCI_PTR_TERM(uhci)) { struct urb_priv *urbp = list_entry(qh->queue.next, struct urb_priv, node); struct uhci_td *td = list_entry(urbp->td_list.next, struct uhci_td, list); - qh->element = LINK_TO_TD(td); + qh->element = LINK_TO_TD(uhci, td); } /* Treat the queue as if it has just advanced */ @@ -531,7 +534,7 @@ static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; - __le32 link_to_next_qh = qh->link; + __hc32 link_to_next_qh = qh->link; pqh = list_entry(qh->node.prev, struct uhci_qh, node); pqh->link = link_to_next_qh; @@ -565,7 +568,7 @@ static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) qh->unlink_frame = uhci->frame_number; /* Force an interrupt so we know when the QH is fully unlinked */ - if (list_empty(&uhci->skel_unlink_qh->node)) + if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped) uhci_set_next_interrupt(uhci); /* Move the QH from its old list to the end of the unlinking list */ @@ -728,7 +731,7 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, urbp->urb = urb; urb->hcpriv = urbp; - + INIT_LIST_HEAD(&urbp->node); INIT_LIST_HEAD(&urbp->td_list); @@ -755,8 +758,8 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci, /* * Map status to standard result codes * - * <status> is (td_status(td) & 0xF60000), a.k.a. - * uhci_status_bits(td_status(td)). + * <status> is (td_status(uhci, td) & 0xF60000), a.k.a. + * uhci_status_bits(td_status(uhci, td)). * Note: <status> does not include the TD_CTRL_NAK bit. * <dir_out> is True for output TDs and False for input TDs. */ @@ -789,10 +792,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, { struct uhci_td *td; unsigned long destination, status; - int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); + int maxsze = usb_endpoint_maxp(&qh->hep->desc); int len = urb->transfer_buffer_length; dma_addr_t data = urb->transfer_dma; - __le32 *plink; + __hc32 *plink; struct urb_priv *urbp = urb->hcpriv; int skel; @@ -809,7 +812,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, */ td = qh->dummy_td; uhci_add_td_to_urbp(td, urbp); - uhci_fill_td(td, status, destination | uhci_explen(8), + uhci_fill_td(uhci, td, status, destination | uhci_explen(8), urb->setup_dma); plink = &td->link; status |= TD_CTRL_ACTIVE; @@ -842,14 +845,14 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = LINK_TO_TD(td); + *plink = LINK_TO_TD(uhci, td); /* Alternate Data0/1 (start with Data1) */ destination ^= TD_TOKEN_TOGGLE; - + uhci_add_td_to_urbp(td, urbp); - uhci_fill_td(td, status, destination | uhci_explen(pktsze), - data); + uhci_fill_td(uhci, td, status, + destination | uhci_explen(pktsze), data); plink = &td->link; data += pktsze; @@ -857,19 +860,19 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, } /* - * Build the final TD for control status + * Build the final TD for control status */ td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = LINK_TO_TD(td); + *plink = LINK_TO_TD(uhci, td); /* Change direction for the status transaction */ destination ^= (USB_PID_IN ^ USB_PID_OUT); destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ uhci_add_td_to_urbp(td, urbp); - uhci_fill_td(td, status | TD_CTRL_IOC, + uhci_fill_td(uhci, td, status | TD_CTRL_IOC, destination | uhci_explen(0), 0); plink = &td->link; @@ -879,11 +882,11 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = LINK_TO_TD(td); + *plink = LINK_TO_TD(uhci, td); - uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); + uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0); wmb(); - qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE); + qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); qh->dummy_td = td; /* Low-speed transfers get a different queue, and won't hog the bus. @@ -915,12 +918,15 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, { struct uhci_td *td; unsigned long destination, status; - int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); + int maxsze = usb_endpoint_maxp(&qh->hep->desc); int len = urb->transfer_buffer_length; - dma_addr_t data = urb->transfer_dma; - __le32 *plink; + int this_sg_len; + dma_addr_t data; + __hc32 *plink; struct urb_priv *urbp = urb->hcpriv; unsigned int toggle; + struct scatterlist *sg; + int i; if (len < 0) return -EINVAL; @@ -937,12 +943,26 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, if (usb_pipein(urb->pipe)) status |= TD_CTRL_SPD; + i = urb->num_mapped_sgs; + if (len > 0 && i > 0) { + sg = urb->sg; + data = sg_dma_address(sg); + + /* urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), len); + } else { + sg = NULL; + data = urb->transfer_dma; + this_sg_len = len; + } /* * Build the DATA TDs */ plink = NULL; td = qh->dummy_td; - do { /* Allow zero length packets */ + for (;;) { /* Allow zero length packets */ int pktsze = maxsze; if (len <= pktsze) { /* The last packet */ @@ -955,20 +975,28 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = LINK_TO_TD(td); + *plink = LINK_TO_TD(uhci, td); } uhci_add_td_to_urbp(td, urbp); - uhci_fill_td(td, status, + uhci_fill_td(uhci, td, status, destination | uhci_explen(pktsze) | (toggle << TD_TOKEN_TOGGLE_SHIFT), data); plink = &td->link; status |= TD_CTRL_ACTIVE; + toggle ^= 1; data += pktsze; + this_sg_len -= pktsze; len -= maxsze; - toggle ^= 1; - } while (len > 0); + if (this_sg_len <= 0) { + if (--i <= 0 || len <= 0) + break; + sg = sg_next(sg); + data = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), len); + } + } /* * URB_ZERO_PACKET means adding a 0-length packet, if direction @@ -983,10 +1011,10 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = LINK_TO_TD(td); + *plink = LINK_TO_TD(uhci, td); uhci_add_td_to_urbp(td, urbp); - uhci_fill_td(td, status, + uhci_fill_td(uhci, td, status, destination | uhci_explen(0) | (toggle << TD_TOKEN_TOGGLE_SHIFT), data); @@ -1001,7 +1029,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, * fast side but not enough to justify delaying an interrupt * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT * flag setting. */ - td->status |= cpu_to_le32(TD_CTRL_IOC); + td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); /* * Build the new dummy TD and activate the old one @@ -1009,11 +1037,11 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, td = uhci_alloc_td(uhci); if (!td) goto nomem; - *plink = LINK_TO_TD(td); + *plink = LINK_TO_TD(uhci, td); - uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); + uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0); wmb(); - qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE); + qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); qh->dummy_td = td; usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), @@ -1106,7 +1134,7 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, * the queue at the status stage transaction, which is * the last TD. */ WARN_ON(list_empty(&urbp->td_list)); - qh->element = LINK_TO_TD(td); + qh->element = LINK_TO_TD(uhci, td); tmp = td->list.prev; ret = -EINPROGRESS; @@ -1115,8 +1143,9 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, /* When a bulk/interrupt transfer is short, we have to * fix up the toggles of the following URBs on the queue * before restarting the queue at the next URB. */ - qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1; - uhci_fixup_toggles(qh, 1); + qh->initial_toggle = + uhci_toggle(td_token(uhci, qh->post_td)) ^ 1; + uhci_fixup_toggles(uhci, qh, 1); if (list_empty(&urbp->td_list)) td = qh->post_td; @@ -1151,7 +1180,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) unsigned int ctrlstat; int len; - ctrlstat = td_status(td); + ctrlstat = td_status(uhci, td); status = uhci_status_bits(ctrlstat); if (status & TD_CTRL_ACTIVE) return -EINPROGRESS; @@ -1161,7 +1190,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) if (status) { ret = uhci_map_status(status, - uhci_packetout(td_token(td))); + uhci_packetout(td_token(uhci, td))); if ((debug == 1 && ret != -EPIPE) || debug > 1) { /* Some debugging code */ dev_dbg(&urb->dev->dev, @@ -1171,13 +1200,13 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) if (debug > 1 && errbuf) { /* Print the chain for debugging */ uhci_show_qh(uhci, urbp->qh, errbuf, - ERRBUF_LEN, 0); + ERRBUF_LEN - EXTRA_SPACE, 0); lprintk(errbuf); } } /* Did we receive a short packet? */ - } else if (len < uhci_expected_length(td_token(td))) { + } else if (len < uhci_expected_length(td_token(uhci, td))) { /* For control transfers, go to the status TD if * this isn't already the last data TD */ @@ -1209,10 +1238,10 @@ err: if (ret < 0) { /* Note that the queue has stopped and save * the next toggle value */ - qh->element = UHCI_PTR_TERM; + qh->element = UHCI_PTR_TERM(uhci); qh->is_stopped = 1; qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); - qh->initial_toggle = uhci_toggle(td_token(td)) ^ + qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^ (ret == -EREMOTEIO); } else /* Short packet received */ @@ -1227,7 +1256,8 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh *qh) { struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ - int i, frame; + int i; + unsigned frame, next; unsigned long destination, status; struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; @@ -1236,37 +1266,29 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, urb->number_of_packets >= UHCI_NUMFRAMES) return -EFBIG; + uhci_get_current_frame_number(uhci); + /* Check the period and figure out the starting frame number */ if (!qh->bandwidth_reserved) { qh->period = urb->interval; - if (urb->transfer_flags & URB_ISO_ASAP) { - qh->phase = -1; /* Find the best phase */ - i = uhci_check_bandwidth(uhci, qh); - if (i) - return i; - - /* Allow a little time to allocate the TDs */ - uhci_get_current_frame_number(uhci); - frame = uhci->frame_number + 10; - - /* Move forward to the first frame having the - * correct phase */ - urb->start_frame = frame + ((qh->phase - frame) & - (qh->period - 1)); - } else { - i = urb->start_frame - uhci->last_iso_frame; - if (i <= 0 || i >= UHCI_NUMFRAMES) - return -EINVAL; - qh->phase = urb->start_frame & (qh->period - 1); - i = uhci_check_bandwidth(uhci, qh); - if (i) - return i; - } + qh->phase = -1; /* Find the best phase */ + i = uhci_check_bandwidth(uhci, qh); + if (i) + return i; + + /* Allow a little time to allocate the TDs */ + next = uhci->frame_number + 10; + frame = qh->phase; + + /* Round up to the first available slot */ + frame += (next - frame + qh->period - 1) & -qh->period; } else if (qh->period != urb->interval) { return -EINVAL; /* Can't change the period */ } else { + next = uhci->frame_number + 1; + /* Find the next unused frame */ if (list_empty(&qh->queue)) { frame = qh->iso_frame; @@ -1279,25 +1301,35 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, lurb->number_of_packets * lurb->interval; } - if (urb->transfer_flags & URB_ISO_ASAP) { - /* Skip some frames if necessary to insure - * the start frame is in the future. + + /* Fell behind? */ + if (!uhci_frame_before_eq(next, frame)) { + + /* USB_ISO_ASAP: Round up to the first available slot */ + if (urb->transfer_flags & URB_ISO_ASAP) + frame += (next - frame + qh->period - 1) & + -qh->period; + + /* + * Not ASAP: Use the next slot in the stream, + * no matter what. */ - uhci_get_current_frame_number(uhci); - if (uhci_frame_before_eq(frame, uhci->frame_number)) { - frame = uhci->frame_number + 1; - frame += ((qh->phase - frame) & - (qh->period - 1)); - } - } /* Otherwise pick up where the last URB leaves off */ - urb->start_frame = frame; + else if (!uhci_frame_before_eq(next, + frame + (urb->number_of_packets - 1) * + qh->period)) + dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n", + urb, frame, + (urb->number_of_packets - 1) * + qh->period, + next); + } } /* Make sure we won't have to go too far into the future */ if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, - urb->start_frame + urb->number_of_packets * - urb->interval)) + frame + urb->number_of_packets * urb->interval)) return -EFBIG; + urb->start_frame = frame; status = TD_CTRL_ACTIVE | TD_CTRL_IOS; destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); @@ -1308,14 +1340,14 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, return -ENOMEM; uhci_add_td_to_urbp(td, urbp); - uhci_fill_td(td, status, destination | + uhci_fill_td(uhci, td, status, destination | uhci_explen(urb->iso_frame_desc[i].length), urb->transfer_dma + urb->iso_frame_desc[i].offset); } /* Set the interrupt-on-completion flag on the last packet. */ - td->status |= cpu_to_le32(TD_CTRL_IOC); + td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); /* Add the TDs to the frame list */ frame = urb->start_frame; @@ -1351,7 +1383,7 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) uhci_remove_tds_from_frame(uhci, qh->iso_frame); - ctrlstat = td_status(td); + ctrlstat = td_status(uhci, td); if (ctrlstat & TD_CTRL_ACTIVE) { status = -EXDEV; /* TD was added too late? */ } else { @@ -1602,7 +1634,7 @@ restart: * queue, the QH can now be re-activated. */ if (!list_empty(&qh->queue)) { if (qh->needs_fixup) - uhci_fixup_toggles(qh, 0); + uhci_fixup_toggles(uhci, qh, 0); /* If the first URB on the queue wants FSBR but its time * limit has expired, set the next TD to interrupt on @@ -1612,7 +1644,7 @@ restart: struct uhci_td *td = list_entry(urbp->td_list.next, struct uhci_td, list); - td->status |= __cpu_to_le32(TD_CTRL_IOC); + td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); } uhci_activate_qh(uhci, qh); @@ -1659,7 +1691,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) } else { urbp = list_entry(qh->queue.next, struct urb_priv, node); td = list_entry(urbp->td_list.next, struct uhci_td, list); - status = td_status(td); + status = td_status(uhci, td); if (!(status & TD_CTRL_ACTIVE)) { /* We're okay, the queue has advanced */ @@ -1667,7 +1699,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) qh->advance_jiffies = jiffies; goto done; } - ret = 0; + ret = uhci->is_stopped; } /* The queue hasn't advanced; check for timeout */ @@ -1677,7 +1709,8 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { /* Detect the Intel bug and work around it */ - if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) { + if (qh->post_td && qh_element(qh) == + LINK_TO_TD(uhci, qh->post_td)) { qh->element = qh->post_td->link; qh->advance_jiffies = jiffies; ret = 1; |
