diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 90 |
1 files changed, 63 insertions, 27 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7113d16e2d3..54139a2f06c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, (unsigned long long) addr); } +/* flip_cycle means flip the cycle bit of all but the first and last TRB. + * (The last TRB actually points to the ring enqueue pointer, which is not part + * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. + */ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, - struct xhci_td *cur_td) + struct xhci_td *cur_td, bool flip_cycle) { struct xhci_segment *cur_seg; union xhci_trb *cur_trb; @@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, * leave the pointers intact. */ cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); + /* Flip the cycle bit (link TRBs can't be the first + * or last TRB). + */ + if (flip_cycle) + cur_trb->generic.field[3] ^= + cpu_to_le32(TRB_CYCLE); xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); xhci_dbg(xhci, "Address = %p (0x%llx dma); " "in seg %p (0x%llx dma)\n", @@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, cur_trb->generic.field[2] = 0; /* Preserve only the cycle bit of this TRB */ cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + /* Flip the cycle bit except on the first or last TRB */ + if (flip_cycle && cur_trb != cur_td->first_trb && + cur_trb != cur_td->last_trb) + cur_trb->generic.field[3] ^= + cpu_to_le32(TRB_CYCLE); cur_trb->generic.field[3] |= cpu_to_le32( TRB_TYPE(TRB_TR_NOOP)); xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " @@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, cur_td->urb->stream_id, cur_td, &deq_state); else - td_to_noop(xhci, ep_ring, cur_td); + td_to_noop(xhci, ep_ring, cur_td, false); remove_finished_td: /* * The event handler won't see a completion for this TD anymore, * so remove it from the endpoint ring's TD list. Keep it in * the cancelled TD list for URB completion later. */ - list_del(&cur_td->td_list); + list_del_init(&cur_td->td_list); } last_unlinked_td = cur_td; xhci_stop_watchdog_timer_in_irq(xhci, ep); @@ -754,7 +769,7 @@ remove_finished_td: do { cur_td = list_entry(ep->cancelled_td_list.next, struct xhci_td, cancelled_td_list); - list_del(&cur_td->cancelled_td_list); + list_del_init(&cur_td->cancelled_td_list); /* Clean up the cancelled URB */ /* Doesn't matter what we pass for status, since the core will @@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) cur_td = list_first_entry(&ring->td_list, struct xhci_td, td_list); - list_del(&cur_td->td_list); + list_del_init(&cur_td->td_list); if (!list_empty(&cur_td->cancelled_td_list)) - list_del(&cur_td->cancelled_td_list); + list_del_init(&cur_td->cancelled_td_list); xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN, "killed"); } @@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) &temp_ep->cancelled_td_list, struct xhci_td, cancelled_td_list); - list_del(&cur_td->cancelled_td_list); + list_del_init(&cur_td->cancelled_td_list); xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN, "killed"); } @@ -1565,10 +1580,10 @@ td_cleanup: else *status = 0; } - list_del(&td->td_list); + list_del_init(&td->td_list); /* Was this TD slated to be cancelled but completed anyway? */ if (!list_empty(&td->cancelled_td_list)) - list_del(&td->cancelled_td_list); + list_del_init(&td->cancelled_td_list); urb_priv->td_cnt++; /* Giveback the urb when all the tds are completed */ @@ -2500,11 +2515,8 @@ static int prepare_transfer(struct xhci_hcd *xhci, if (td_index == 0) { ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); - if (unlikely(ret)) { - xhci_urb_free_priv(xhci, urb_priv); - urb->hcpriv = NULL; + if (unlikely(ret)) return ret; - } } td->urb = urb; @@ -2672,6 +2684,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, { int packets_transferred; + /* One TRB with a zero-length data packet. */ + if (running_total == 0 && trb_buff_len == 0) + return 0; + /* All the TRB queueing functions don't count the current TRB in * running_total. */ @@ -3113,20 +3129,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, struct urb *urb, int i) { int num_trbs = 0; - u64 addr, td_len, running_total; + u64 addr, td_len; addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); td_len = urb->iso_frame_desc[i].length; - running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); - running_total &= TRB_MAX_BUFF_SIZE - 1; - if (running_total != 0) - num_trbs++; - - while (running_total < td_len) { + num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), + TRB_MAX_BUFF_SIZE); + if (num_trbs == 0) num_trbs++; - running_total += TRB_MAX_BUFF_SIZE; - } return num_trbs; } @@ -3226,6 +3237,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, start_trb = &ep_ring->enqueue->generic; start_cycle = ep_ring->cycle_state; + urb_priv = urb->hcpriv; /* Queue the first TRB, even if it's zero-length */ for (i = 0; i < num_tds; i++) { unsigned int total_packet_count; @@ -3237,9 +3249,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, addr = start_addr + urb->iso_frame_desc[i].offset; td_len = urb->iso_frame_desc[i].length; td_remain_len = td_len; - /* FIXME: Ignoring zero-length packets, can those happen? */ total_packet_count = roundup(td_len, le16_to_cpu(urb->ep->desc.wMaxPacketSize)); + /* A zero-length transfer still involves at least one packet. */ + if (total_packet_count == 0) + total_packet_count++; burst_count = xhci_get_burst_count(xhci, urb->dev, urb, total_packet_count); residue = xhci_get_last_burst_packet_count(xhci, @@ -3249,12 +3263,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, urb->stream_id, trbs_per_td, urb, i, mem_flags); - if (ret < 0) - return ret; + if (ret < 0) { + if (i == 0) + return ret; + goto cleanup; + } - urb_priv = urb->hcpriv; td = urb_priv->td[i]; - for (j = 0; j < trbs_per_td; j++) { u32 remainder = 0; field = TRB_TBC(burst_count) | TRB_TLBPC(residue); @@ -3344,6 +3359,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, start_cycle, start_trb); return 0; +cleanup: + /* Clean up a partially enqueued isoc transfer. */ + + for (i--; i >= 0; i--) + list_del_init(&urb_priv->td[i]->td_list); + + /* Use the first TD as a temporary variable to turn the TDs we've queued + * into No-ops with a software-owned cycle bit. That way the hardware + * won't accidentally start executing bogus TDs when we partially + * overwrite them. td->first_trb and td->start_seg are already set. + */ + urb_priv->td[0]->last_trb = ep_ring->enqueue; + /* Every TRB except the first & last will have its cycle bit flipped. */ + td_to_noop(xhci, ep_ring, urb_priv->td[0], true); + + /* Reset the ring enqueue back to the first TRB and its cycle bit. */ + ep_ring->enqueue = urb_priv->td[0]->first_trb; + ep_ring->enq_seg = urb_priv->td[0]->start_seg; + ep_ring->cycle_state = start_cycle; + usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); + return ret; } /* |