aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c987
1 files changed, 458 insertions, 529 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6bfbd80ec2b..749fc68eb5c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -69,10 +69,6 @@
#include "xhci.h"
#include "xhci-trace.h"
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_virt_device *virt_dev,
- struct xhci_event_cmd *event);
-
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
@@ -123,16 +119,6 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
return TRB_TYPE_LINK_LE32(link->control);
}
-union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
-{
- /* Enqueue pointer can be left pointing to the link TRB,
- * we must handle that
- */
- if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
- return ring->enq_seg->next->trbs;
- return ring->enqueue;
-}
-
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -156,8 +142,6 @@ static void next_trb(struct xhci_hcd *xhci,
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- unsigned long long addr;
-
ring->deq_updates++;
/*
@@ -178,7 +162,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->type == TYPE_EVENT &&
last_trb_on_last_seg(xhci, ring,
ring->deq_seg, ring->dequeue)) {
- ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ ring->cycle_state ^= 1;
}
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
@@ -186,8 +170,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring->dequeue++;
}
} while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
-
- addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
}
/*
@@ -212,7 +194,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
{
u32 chain;
union xhci_trb *next;
- unsigned long long addr;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
@@ -264,7 +245,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
- addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
}
/*
@@ -295,9 +275,9 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
return;
xhci_dbg(xhci, "// Ding dong!\n");
- xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
+ writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
- xhci_readl(xhci, &xhci->dba->doorbell[0]);
+ readl(&xhci->dba->doorbell[0]);
}
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
@@ -307,17 +287,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
xhci_dbg(xhci, "Abort command ring\n");
- if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
- xhci_dbg(xhci, "The command ring isn't running, "
- "Have the command ring been stopped?\n");
- return 0;
- }
-
temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- if (!(temp_64 & CMD_RING_RUNNING)) {
- xhci_dbg(xhci, "Command ring had been stopped\n");
- return 0;
- }
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
@@ -343,71 +313,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
return 0;
}
-static int xhci_queue_cd(struct xhci_hcd *xhci,
- struct xhci_command *command,
- union xhci_trb *cmd_trb)
-{
- struct xhci_cd *cd;
- cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
- if (!cd)
- return -ENOMEM;
- INIT_LIST_HEAD(&cd->cancel_cmd_list);
-
- cd->command = command;
- cd->cmd_trb = cmd_trb;
- list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
-
- return 0;
-}
-
-/*
- * Cancel the command which has issue.
- *
- * Some commands may hang due to waiting for acknowledgement from
- * usb device. It is outside of the xHC's ability to control and
- * will cause the command ring is blocked. When it occurs software
- * should intervene to recover the command ring.
- * See Section 4.6.1.1 and 4.6.1.2
- */
-int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
- union xhci_trb *cmd_trb)
-{
- int retval = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&xhci->lock, flags);
-
- if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_warn(xhci, "Abort the command ring,"
- " but the xHCI is dead.\n");
- retval = -ESHUTDOWN;
- goto fail;
- }
-
- /* queue the cmd desriptor to cancel_cmd_list */
- retval = xhci_queue_cd(xhci, command, cmd_trb);
- if (retval) {
- xhci_warn(xhci, "Queuing command descriptor failed.\n");
- goto fail;
- }
-
- /* abort command ring */
- retval = xhci_abort_cmd_ring(xhci);
- if (retval) {
- xhci_err(xhci, "Abort command ring failed\n");
- if (unlikely(retval == -ESHUTDOWN)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
- xhci_dbg(xhci, "xHCI host controller is dead.\n");
- return retval;
- }
- }
-
-fail:
- spin_unlock_irqrestore(&xhci->lock, flags);
- return retval;
-}
-
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index,
@@ -427,7 +332,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED))
return;
- xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
+ writel(DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
*/
@@ -552,10 +457,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
- struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
+ u64 hw_dequeue;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
@@ -565,56 +471,65 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
stream_id);
return;
}
- state->new_cycle_state = 0;
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Finding segment containing stopped TRB.");
- state->new_deq_seg = find_trb_seg(cur_td->start_seg,
- dev->eps[ep_index].stopped_trb,
- &state->new_cycle_state);
- if (!state->new_deq_seg) {
- WARN_ON(1);
- return;
- }
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ /* 4.6.9 the css flag is written to the stream context for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ hw_dequeue = le64_to_cpu(ctx->stream_ring);
+ } else {
+ struct xhci_ep_ctx *ep_ctx
+ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ hw_dequeue = le64_to_cpu(ep_ctx->deq);
+ }
+
+ /* Find virtual address and segment of hardware dequeue pointer */
+ state->new_deq_seg = ep_ring->deq_seg;
+ state->new_deq_ptr = ep_ring->dequeue;
+ while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+ != (dma_addr_t)(hw_dequeue & ~0xf)) {
+ next_trb(xhci, ep_ring, &state->new_deq_seg,
+ &state->new_deq_ptr);
+ if (state->new_deq_ptr == ep_ring->dequeue) {
+ WARN_ON(1);
+ return;
+ }
+ }
+ /*
+ * Find cycle state for last_trb, starting at old cycle state of
+ * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+ * return immediately and cannot toggle the cycle state if this search
+ * wraps around, so add one more toggle manually in that case.
+ */
+ state->new_cycle_state = hw_dequeue & 0x1;
+ if (ep_ring->first_seg == ep_ring->first_seg->next &&
+ cur_td->last_trb < state->new_deq_ptr)
+ state->new_cycle_state ^= 0x1;
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding segment containing last TRB in TD.");
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
- state->new_deq_ptr,
- &state->new_cycle_state);
+ state->new_deq_ptr, &state->new_cycle_state);
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
+ /* Increment to find next TRB after last_trb. Cycle if appropriate. */
trb = &state->new_deq_ptr->generic;
if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
(trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
- /*
- * If there is only one segment in a ring, find_trb_seg()'s while loop
- * will not run, and it will return before it has a chance to see if it
- * needs to toggle the cycle bit. It can't tell if the stalled transfer
- * ended just before the link TRB on a one-segment ring, or if the TD
- * wrapped around the top of the ring, because it doesn't have the TD in
- * question. Look for the one-segment case where stalled TRB's address
- * is greater than the new dequeue pointer address.
- */
- if (ep_ring->first_seg == ep_ring->first_seg->next &&
- state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
- state->new_cycle_state ^= 0x1;
+ /* Don't update the ring cycle state for the producer (us). */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cycle state = 0x%x", state->new_cycle_state);
- /* Don't update the ring cycle state for the producer (us). */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"New dequeue segment = %p (virtual)",
state->new_deq_seg);
@@ -680,12 +595,14 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+static int queue_set_tr_deq(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_command *cmd,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
@@ -700,7 +617,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
- queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
+ queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
@@ -726,7 +643,7 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
- struct xhci_td *cur_td, int status, char *adjective)
+ struct xhci_td *cur_td, int status)
{
struct usb_hcd *hcd;
struct urb *urb;
@@ -765,12 +682,10 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
* 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
* bit cleared) so that the HW will skip over them.
*/
-static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, struct xhci_event_cmd *event)
{
- unsigned int slot_id;
unsigned int ep_index;
- struct xhci_virt_device *virt_dev;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
@@ -779,15 +694,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_dequeue_state deq_state;
- if (unlikely(TRB_TO_SUSPEND_PORT(
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
- slot_id = TRB_TO_SLOT_ID(
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
- virt_dev = xhci->devs[slot_id];
- if (virt_dev)
- handle_cmd_in_cmd_wait_list(xhci, virt_dev,
- event);
- else
+ if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
+ if (!xhci->devs[slot_id])
xhci_warn(xhci, "Stop endpoint command "
"completion for disabled slot %u\n",
slot_id);
@@ -795,14 +703,12 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
}
memset(&deq_state, 0, sizeof(deq_state));
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = &xhci->devs[slot_id]->eps[ep_index];
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -860,7 +766,9 @@ remove_finished_td:
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
- xhci_queue_new_dequeue_state(xhci,
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ xhci_queue_new_dequeue_state(xhci, command,
slot_id, ep_index,
ep->stopped_td->urb->stream_id,
&deq_state);
@@ -870,11 +778,9 @@ remove_finished_td:
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- /* Clear stopped_td and stopped_trb if endpoint is not halted */
- if (!(ep->ep_state & EP_HALTED)) {
+ /* Clear stopped_td if endpoint is not halted */
+ if (!(ep->ep_state & EP_HALTED))
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
- }
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -891,7 +797,7 @@ remove_finished_td:
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
- xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -903,6 +809,57 @@ remove_finished_td:
/* Return to the event handler with xhci->lock re-acquired */
}
+static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_td *cur_td;
+
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td, td_list);
+ list_del_init(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
+static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+ int slot_id, int ep_index)
+{
+ struct xhci_td *cur_td;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ring;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & EP_HAS_STREAMS) ||
+ (ep->ep_state & EP_GETTING_NO_STREAMS)) {
+ int stream_id;
+
+ for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u, stream %u",
+ slot_id, ep_index, stream_id + 1);
+ xhci_kill_ring_urbs(xhci,
+ ep->stream_info->stream_rings[stream_id]);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u",
+ slot_id, ep_index);
+ xhci_kill_ring_urbs(xhci, ring);
+ }
+ while (!list_empty(&ep->cancelled_td_list)) {
+ cur_td = list_first_entry(&ep->cancelled_td_list,
+ struct xhci_td, cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -926,9 +883,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
- struct xhci_virt_ep *temp_ep;
- struct xhci_ring *ring;
- struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
@@ -985,34 +939,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
- for (j = 0; j < 31; j++) {
- temp_ep = &xhci->devs[i]->eps[j];
- ring = temp_ep->ring;
- if (!ring)
- continue;
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Killing URBs for slot ID %u, "
- "ep index %u", i, j);
- while (!list_empty(&ring->td_list)) {
- cur_td = list_first_entry(&ring->td_list,
- struct xhci_td,
- td_list);
- list_del_init(&cur_td->td_list);
- if (!list_empty(&cur_td->cancelled_td_list))
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN, "killed");
- }
- while (!list_empty(&temp_ep->cancelled_td_list)) {
- cur_td = list_first_entry(
- &temp_ep->cancelled_td_list,
- struct xhci_td,
- cancelled_td_list);
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN, "killed");
- }
- }
+ for (j = 0; j < 31; j++)
+ xhci_kill_endpoint_urbs(xhci, i, j);
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1077,27 +1005,25 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
* endpoint doorbell to restart the ring, but only if there aren't more
* cancellations pending.
*/
-static void handle_set_deq_completion(struct xhci_hcd *xhci,
- struct xhci_event_cmd *event,
- union xhci_trb *trb)
+static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 cmd_comp_code)
{
- unsigned int slot_id;
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
+ ep = &dev->eps[ep_index];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
- xhci_warn(xhci, "WARN Set TR deq ptr command for "
- "freed stream ID %u\n",
+ xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
@@ -1107,18 +1033,16 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
- if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
+ if (cmd_comp_code != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
- switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
+ switch (cmd_comp_code) {
case COMP_TRB_ERR:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
- "of stream ID configuration\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CTX_STATE:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
- "to incorrect slot or ep state.\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
@@ -1128,13 +1052,12 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
slot_state, ep_state);
break;
case COMP_EBADSLT:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
- "slot %u was not enabled.\n", slot_id);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
+ slot_id);
break;
default:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
- "completion code of %u.\n",
- GET_COMP_CODE(le32_to_cpu(event->status)));
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
+ cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
@@ -1144,23 +1067,28 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
* cancelling URBs, which might not be an error...
*/
} else {
+ u64 deq;
+ /* 4.6.10 deq ptr is written to the stream ctx for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+ } else {
+ deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Successful Set TR Deq Ptr cmd, deq = @%08llx",
- le64_to_cpu(ep_ctx->deq));
- if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr) ==
- (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
+ "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
+ if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
+ ep->queued_deq_ptr) == deq) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
- xhci_warn(xhci, "Mismatch between completed Set TR Deq "
- "Ptr command & xHCI internal state.\n");
+ xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
- dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr);
+ ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
@@ -1171,30 +1099,28 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
-static void handle_reset_ep_completion(struct xhci_hcd *xhci,
- struct xhci_event_cmd *event,
- union xhci_trb *trb)
+static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 cmd_comp_code)
{
- int slot_id;
unsigned int ep_index;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
- "Ignoring reset ep completion code of %u",
- GET_COMP_CODE(le32_to_cpu(event->status)));
+ "Ignoring reset ep completion code of %u", cmd_comp_code);
/* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Queueing configure endpoint command");
- xhci_queue_configure_endpoint(xhci,
+ xhci_queue_configure_endpoint(xhci, command,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
xhci_ring_cmd_db(xhci);
@@ -1205,185 +1131,211 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
}
}
-/* Complete the command and detele it from the devcie's command queue.
- */
-static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_command *command, u32 status)
+static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
+ u32 cmd_comp_code)
{
- command->status = status;
- list_del(&command->cmd_list);
- if (command->completion)
- complete(command->completion);
+ if (cmd_comp_code == COMP_SUCCESS)
+ xhci->slot_id = slot_id;
else
- xhci_free_command(xhci, command);
+ xhci->slot_id = 0;
}
-
-/* Check to see if a command in the device's command queue matches this one.
- * Signal the completion or free the command, and return 1. Return 0 if the
- * completed command isn't at the head of the command list.
- */
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
- struct xhci_virt_device *virt_dev,
- struct xhci_event_cmd *event)
+static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
- struct xhci_command *command;
-
- if (list_empty(&virt_dev->cmd_list))
- return 0;
-
- command = list_entry(virt_dev->cmd_list.next,
- struct xhci_command, cmd_list);
- if (xhci->cmd_ring->dequeue != command->command_trb)
- return 0;
+ struct xhci_virt_device *virt_dev;
- xhci_complete_cmd_in_cmd_wait_list(xhci, command,
- GET_COMP_CODE(le32_to_cpu(event->status)));
- return 1;
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return;
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, true);
+ xhci_free_virt_device(xhci, slot_id);
}
-/*
- * Finding the command trb need to be cancelled and modifying it to
- * NO OP command. And if the command is in device's command wait
- * list, finishing and freeing it.
- *
- * If we can't find the command trb, we think it had already been
- * executed.
- */
-static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
+static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_event_cmd *event, u32 cmd_comp_code)
{
- struct xhci_segment *cur_seg;
- union xhci_trb *cmd_trb;
- u32 cycle_state;
-
- if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
- return;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ unsigned int ep_index;
+ unsigned int ep_state;
+ u32 add_flags, drop_flags;
- /* find the current segment of command ring */
- cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
- xhci->cmd_ring->dequeue, &cycle_state);
-
- if (!cur_seg) {
- xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
- xhci->cmd_ring->dequeue,
- (unsigned long long)
- xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue));
- xhci_debug_ring(xhci, xhci->cmd_ring);
- xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ /*
+ * Configure endpoint commands can come from the USB core
+ * configuration or alt setting changes, or because the HW
+ * needed an extra configure endpoint command after a reset
+ * endpoint command or streams were being configured.
+ * If the command was for a halted endpoint, the xHCI driver
+ * is not waiting on the configure endpoint command.
+ */
+ virt_dev = xhci->devs[slot_id];
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ if (!ctrl_ctx) {
+ xhci_warn(xhci, "Could not get input context, bad type.\n");
return;
}
- /* find the command trb matched by cd from command ring */
- for (cmd_trb = xhci->cmd_ring->dequeue;
- cmd_trb != xhci->cmd_ring->enqueue;
- next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
- /* If the trb is link trb, continue */
- if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
- continue;
-
- if (cur_cd->cmd_trb == cmd_trb) {
+ add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+ drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+ /* Input ctx add_flags are the endpoint index plus one */
+ ep_index = xhci_last_valid_endpoint(add_flags) - 1;
- /* If the command in device's command list, we should
- * finish it and free the command structure.
- */
- if (cur_cd->command)
- xhci_complete_cmd_in_cmd_wait_list(xhci,
- cur_cd->command, COMP_CMD_STOP);
-
- /* get cycle state from the origin command trb */
- cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
- & TRB_CYCLE;
-
- /* modify the command trb to NO OP command */
- cmd_trb->generic.field[0] = 0;
- cmd_trb->generic.field[1] = 0;
- cmd_trb->generic.field[2] = 0;
- cmd_trb->generic.field[3] = cpu_to_le32(
- TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
- break;
- }
+ /* A usb_set_interface() call directly after clearing a halted
+ * condition may race on this quirky hardware. Not worth
+ * worrying about, since this is prototype hardware. Not sure
+ * if this will work for streams, but streams support was
+ * untested on this prototype.
+ */
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
+ ep_index != (unsigned int) -1 &&
+ add_flags - SLOT_FLAG == drop_flags) {
+ ep_state = virt_dev->eps[ep_index].ep_state;
+ if (!(ep_state & EP_HALTED))
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Completed config ep cmd - "
+ "last ep index = %d, state = %d",
+ ep_index, ep_state);
+ /* Clear internal halted state and restart ring(s) */
+ virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ return;
}
+ return;
}
-static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_event_cmd *event)
{
- struct xhci_cd *cur_cd, *next_cd;
+ xhci_dbg(xhci, "Completed reset device command.\n");
+ if (!xhci->devs[slot_id])
+ xhci_warn(xhci, "Reset device command completion "
+ "for disabled slot %u\n", slot_id);
+}
- if (list_empty(&xhci->cancel_cmd_list))
+static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+{
+ if (!(xhci->quirks & XHCI_NEC_HOST)) {
+ xhci->error_bitmask |= 1 << 6;
return;
-
- list_for_each_entry_safe(cur_cd, next_cd,
- &xhci->cancel_cmd_list, cancel_cmd_list) {
- xhci_cmd_to_noop(xhci, cur_cd);
- list_del(&cur_cd->cancel_cmd_list);
- kfree(cur_cd);
}
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "NEC firmware version %2x.%02x",
+ NEC_FW_MAJOR(le32_to_cpu(event->status)),
+ NEC_FW_MINOR(le32_to_cpu(event->status)));
}
-/*
- * traversing the cancel_cmd_list. If the command descriptor according
- * to cmd_trb is found, the function free it and return 1, otherwise
- * return 0.
- */
-static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
- union xhci_trb *cmd_trb)
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
{
- struct xhci_cd *cur_cd, *next_cd;
-
- if (list_empty(&xhci->cancel_cmd_list))
- return 0;
+ list_del(&cmd->cmd_list);
- list_for_each_entry_safe(cur_cd, next_cd,
- &xhci->cancel_cmd_list, cancel_cmd_list) {
- if (cur_cd->cmd_trb == cmd_trb) {
- if (cur_cd->command)
- xhci_complete_cmd_in_cmd_wait_list(xhci,
- cur_cd->command, COMP_CMD_STOP);
- list_del(&cur_cd->cancel_cmd_list);
- kfree(cur_cd);
- return 1;
- }
+ if (cmd->completion) {
+ cmd->status = status;
+ complete(cmd->completion);
+ } else {
+ kfree(cmd);
}
+}
- return 0;
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
+{
+ struct xhci_command *cur_cmd, *tmp_cmd;
+ list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
+ xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
}
/*
- * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
- * trb pointed by the command ring dequeue pointer is the trb we want to
- * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
- * traverse the cancel_cmd_list to trun the all of the commands according
- * to command descriptor to NO-OP trb.
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
+ * If there are other commands waiting then restart the ring and kick the timer.
+ * This must be called with command ring stopped and xhci->lock held.
*/
-static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
- int cmd_trb_comp_code)
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ struct xhci_command *cur_cmd)
{
- int cur_trb_is_good = 0;
+ struct xhci_command *i_cmd, *tmp_cmd;
+ u32 cycle_state;
- /* Searching the cmd trb pointed by the command ring dequeue
- * pointer in command descriptor list. If it is found, free it.
- */
- cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
- xhci->cmd_ring->dequeue);
+ /* Turn all aborted commands in list to no-ops, then restart */
+ list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
+ cmd_list) {
- if (cmd_trb_comp_code == COMP_CMD_ABORT)
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- else if (cmd_trb_comp_code == COMP_CMD_STOP) {
- /* traversing the cancel_cmd_list and canceling
- * the command according to command descriptor
- */
- xhci_cancel_cmd_in_cd_list(xhci);
+ if (i_cmd->status != COMP_CMD_ABORT)
+ continue;
+
+ i_cmd->status = COMP_CMD_STOP;
+
+ xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+ i_cmd->command_trb);
+ /* get cycle state from the original cmd trb */
+ cycle_state = le32_to_cpu(
+ i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
+ /* modify the command trb to no-op command */
+ i_cmd->command_trb->generic.field[0] = 0;
+ i_cmd->command_trb->generic.field[1] = 0;
+ i_cmd->command_trb->generic.field[2] = 0;
+ i_cmd->command_trb->generic.field[3] = cpu_to_le32(
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
- xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
/*
- * ring command ring doorbell again to restart the
- * command ring
+ * caller waiting for completion is called when command
+ * completion event is received for these no-op commands
*/
- if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
- xhci_ring_cmd_db(xhci);
}
- return cur_trb_is_good;
+
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ /* ring command ring doorbell to restart the command ring */
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ xhci->current_cmd = cur_cmd;
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_ring_cmd_db(xhci);
+ }
+ return;
+}
+
+
+void xhci_handle_command_timeout(unsigned long data)
+{
+ struct xhci_hcd *xhci;
+ int ret;
+ unsigned long flags;
+ u64 hw_ring_state;
+ struct xhci_command *cur_cmd = NULL;
+ xhci = (struct xhci_hcd *) data;
+
+ /* mark this command to be cancelled */
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->current_cmd) {
+ cur_cmd = xhci->current_cmd;
+ cur_cmd->status = COMP_CMD_ABORT;
+ }
+
+
+ /* Make sure command ring is running before aborting it */
+ hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
+ (hw_ring_state & CMD_RING_RUNNING)) {
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Command timeout\n");
+ ret = xhci_abort_cmd_ring(xhci);
+ if (unlikely(ret == -ESHUTDOWN)) {
+ xhci_err(xhci, "Abort command ring failed\n");
+ xhci_cleanup_command_queue(xhci);
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
+ }
+ return;
+ }
+ /* command timeout on stopped ring, ring can't be aborted */
+ xhci_dbg(xhci, "Command timeout on stopped ring\n");
+ xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
}
static void handle_cmd_completion(struct xhci_hcd *xhci,
@@ -1392,15 +1344,15 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
- struct xhci_input_control_ctx *ctrl_ctx;
- struct xhci_virt_device *virt_dev;
- unsigned int ep_index;
- struct xhci_ring *ep_ring;
- unsigned int ep_state;
+ u32 cmd_comp_code;
+ union xhci_trb *cmd_trb;
+ struct xhci_command *cmd;
+ u32 cmd_type;
cmd_dma = le64_to_cpu(event->cmd_trb);
+ cmd_trb = xhci->cmd_ring->dequeue;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue);
+ cmd_trb);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */
if (cmd_dequeue_dma == 0) {
xhci->error_bitmask |= 1 << 4;
@@ -1412,147 +1364,101 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
return;
}
- trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic,
- (struct xhci_generic_trb *) event);
+ cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
- (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
- /* If the return value is 0, we think the trb pointed by
- * command ring dequeue pointer is a good trb. The good
- * trb means we don't want to cancel the trb, but it have
- * been stopped by host. So we should handle it normally.
- * Otherwise, driver should invoke inc_deq() and return.
- */
- if (handle_stopped_cmd_ring(xhci,
- GET_COMP_CODE(le32_to_cpu(event->status)))) {
- inc_deq(xhci, xhci->cmd_ring);
- return;
- }
- /* There is no command to handle if we get a stop event when the
- * command ring is empty, event->cmd_trb points to the next
- * unset command
- */
- if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
- return;
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
}
- switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
- & TRB_TYPE_BITMASK) {
- case TRB_TYPE(TRB_ENABLE_SLOT):
- if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
- xhci->slot_id = slot_id;
- else
- xhci->slot_id = 0;
- complete(&xhci->addr_dev);
+ del_timer(&xhci->cmd_timer);
+
+ trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
+
+ cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
+
+ /* If CMD ring stopped we own the trbs between enqueue and dequeue */
+ if (cmd_comp_code == COMP_CMD_STOP) {
+ xhci_handle_stopped_cmd_ring(xhci, cmd);
+ return;
+ }
+ /*
+ * Host aborted the command ring, check if the current command was
+ * supposed to be aborted, otherwise continue normally.
+ * The command ring is stopped now, but the xHC will issue a Command
+ * Ring Stopped event which will cause us to restart it.
+ */
+ if (cmd_comp_code == COMP_CMD_ABORT) {
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ if (cmd->status == COMP_CMD_ABORT)
+ goto event_handled;
+ }
+
+ cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
+ switch (cmd_type) {
+ case TRB_ENABLE_SLOT:
+ xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
break;
- case TRB_TYPE(TRB_DISABLE_SLOT):
- if (xhci->devs[slot_id]) {
- if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
- /* Delete default control endpoint resources */
- xhci_free_device_endpoint_resources(xhci,
- xhci->devs[slot_id], true);
- xhci_free_virt_device(xhci, slot_id);
- }
+ case TRB_DISABLE_SLOT:
+ xhci_handle_cmd_disable_slot(xhci, slot_id);
break;
- case TRB_TYPE(TRB_CONFIG_EP):
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- break;
- /*
- * Configure endpoint commands can come from the USB core
- * configuration or alt setting changes, or because the HW
- * needed an extra configure endpoint command after a reset
- * endpoint command or streams were being configured.
- * If the command was for a halted endpoint, the xHCI driver
- * is not waiting on the configure endpoint command.
- */
- ctrl_ctx = xhci_get_input_control_ctx(xhci,
- virt_dev->in_ctx);
- if (!ctrl_ctx) {
- xhci_warn(xhci, "Could not get input context, bad type.\n");
- break;
- }
- /* Input ctx add_flags are the endpoint index plus one */
- ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
- /* A usb_set_interface() call directly after clearing a halted
- * condition may race on this quirky hardware. Not worth
- * worrying about, since this is prototype hardware. Not sure
- * if this will work for streams, but streams support was
- * untested on this prototype.
- */
- if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
- ep_index != (unsigned int) -1 &&
- le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
- le32_to_cpu(ctrl_ctx->drop_flags)) {
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
- ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
- if (!(ep_state & EP_HALTED))
- goto bandwidth_change;
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Completed config ep cmd - "
- "last ep index = %d, state = %d",
- ep_index, ep_state);
- /* Clear internal halted state and restart ring(s) */
- xhci->devs[slot_id]->eps[ep_index].ep_state &=
- ~EP_HALTED;
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
- break;
- }
-bandwidth_change:
- xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
- "Completed config ep cmd");
- xhci->devs[slot_id]->cmd_status =
- GET_COMP_CODE(le32_to_cpu(event->status));
- complete(&xhci->devs[slot_id]->cmd_completion);
+ case TRB_CONFIG_EP:
+ if (!cmd->completion)
+ xhci_handle_cmd_config_ep(xhci, slot_id, event,
+ cmd_comp_code);
break;
- case TRB_TYPE(TRB_EVAL_CONTEXT):
- virt_dev = xhci->devs[slot_id];
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
- break;
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
- complete(&xhci->devs[slot_id]->cmd_completion);
+ case TRB_EVAL_CONTEXT:
break;
- case TRB_TYPE(TRB_ADDR_DEV):
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
- complete(&xhci->addr_dev);
+ case TRB_ADDR_DEV:
break;
- case TRB_TYPE(TRB_STOP_RING):
- handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
+ case TRB_STOP_RING:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
break;
- case TRB_TYPE(TRB_SET_DEQ):
- handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+ case TRB_SET_DEQ:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
- case TRB_TYPE(TRB_CMD_NOOP):
+ case TRB_CMD_NOOP:
+ /* Is this an aborted command turned to NO-OP? */
+ if (cmd->status == COMP_CMD_STOP)
+ cmd_comp_code = COMP_CMD_STOP;
break;
- case TRB_TYPE(TRB_RESET_EP):
- handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
+ case TRB_RESET_EP:
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
+ le32_to_cpu(cmd_trb->generic.field[3])));
+ xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
break;
- case TRB_TYPE(TRB_RESET_DEV):
- xhci_dbg(xhci, "Completed reset device command.\n");
+ case TRB_RESET_DEV:
+ /* SLOT_ID field in reset device cmd completion event TRB is 0.
+ * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
+ */
slot_id = TRB_TO_SLOT_ID(
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
- virt_dev = xhci->devs[slot_id];
- if (virt_dev)
- handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
- else
- xhci_warn(xhci, "Reset device command completion "
- "for disabled slot %u\n", slot_id);
+ le32_to_cpu(cmd_trb->generic.field[3]));
+ xhci_handle_cmd_reset_dev(xhci, slot_id, event);
break;
- case TRB_TYPE(TRB_NEC_GET_FW):
- if (!(xhci->quirks & XHCI_NEC_HOST)) {
- xhci->error_bitmask |= 1 << 6;
- break;
- }
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "NEC firmware version %2x.%02x",
- NEC_FW_MAJOR(le32_to_cpu(event->status)),
- NEC_FW_MINOR(le32_to_cpu(event->status)));
+ case TRB_NEC_GET_FW:
+ xhci_handle_cmd_nec_get_fw(xhci, event);
break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
break;
}
+
+ /* restart timer if this wasn't the last command */
+ if (cmd->cmd_list.next != &xhci->cmd_list) {
+ xhci->current_cmd = list_entry(cmd->cmd_list.next,
+ struct xhci_command, cmd_list);
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+
+event_handled:
+ xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+
inc_deq(xhci, xhci->cmd_ring);
}
@@ -1611,7 +1517,7 @@ static void handle_device_notification(struct xhci_hcd *xhci,
u32 slot_id;
struct usb_device *udev;
- slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "Device Notification event for "
"unused slot %u\n", slot_id);
@@ -1695,7 +1601,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
port_id);
- temp = xhci_readl(xhci, port_array[faked_port_index]);
+ temp = readl(port_array[faked_port_index]);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
@@ -1704,7 +1610,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
- temp1 = xhci_readl(xhci, &xhci->op_regs->command);
+ temp1 = readl(&xhci->op_regs->command);
if (!(temp1 & CMD_RUN)) {
xhci_warn(xhci, "xHC is not running.\n");
goto cleanup;
@@ -1868,16 +1774,19 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
struct xhci_td *td, union xhci_trb *event_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ struct xhci_command *command;
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return;
+
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = stream_id;
- xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
@@ -1959,7 +1868,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
return 0;
} else {
if (trb_comp_code == COMP_STALL) {
@@ -1971,7 +1879,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* USB class driver clear the stall later.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
@@ -2588,7 +2495,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* successful event after a short transfer.
* Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
ret = 0;
@@ -2787,7 +2694,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
- status = xhci_readl(xhci, &xhci->op_regs->status);
+ status = readl(&xhci->op_regs->status);
if (status == 0xffffffff)
goto hw_died;
@@ -2809,16 +2716,16 @@ hw_died:
* Write 1 to clear the interrupt status.
*/
status |= STS_EINT;
- xhci_writel(xhci, status, &xhci->op_regs->status);
+ writel(status, &xhci->op_regs->status);
/* FIXME when MSI-X is supported and there are multiple vectors */
/* Clear the MSI-X event interrupt status */
if (hcd->irq) {
u32 irq_pending;
/* Acknowledge the PCI interrupt */
- irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ irq_pending = readl(&xhci->ir_set->irq_pending);
irq_pending |= IMAN_IP;
- xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
+ writel(irq_pending, &xhci->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
@@ -3630,7 +3537,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
- return roundup(total_packet_count, max_burst + 1) - 1;
+ return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
}
/*
@@ -3887,7 +3794,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
if (ret)
return ret;
- start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+ start_frame = readl(&xhci->run_regs->microframe_index);
start_frame &= 0x3fff;
urb->start_frame = start_frame;
@@ -3930,11 +3837,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
* because the command event handler may want to resubmit a failed command.
*/
-static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
- u32 field3, u32 field4, bool command_must_succeed)
+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 field1, u32 field2,
+ u32 field3, u32 field4, bool command_must_succeed)
{
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ESHUTDOWN;
if (!command_must_succeed)
reserved_trbs++;
@@ -3948,57 +3858,71 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
+
+ cmd->command_trb = xhci->cmd_ring->enqueue;
+ list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
+
+ /* if there are no other commands queued we start the timeout timer */
+ if (xhci->cmd_list.next == &cmd->cmd_list &&
+ !timer_pending(&xhci->cmd_timer)) {
+ xhci->current_cmd = cmd;
+ mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}
/* Queue a slot enable or disable request on the command ring */
-int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 trb_type, u32 slot_id)
{
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
-int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id)
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
- TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
- false);
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
+ | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
}
-int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 field1, u32 field2, u32 field3, u32 field4)
{
- return queue_command(xhci, field1, field2, field3, field4, false);
+ return queue_command(xhci, cmd, field1, field2, field3, field4, false);
}
/* Queue a reset device command TRB */
-int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 slot_id)
{
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */
-int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/* Queue an evaluate context command TRB */
-int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
- u32 slot_id, bool command_must_succeed)
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
{
- return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
@@ -4008,30 +3932,32 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
* Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
* activity on an endpoint that is about to be suspended.
*/
-int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, int suspend)
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index, int suspend)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_STOP_RING);
u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
- return queue_command(xhci, 0, 0, 0,
+ return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
/* Set Transfer Ring Dequeue Pointer command.
* This should not be used for endpoints that have streams enabled.
*/
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_segment *deq_seg,
- union xhci_trb *deq_ptr, u32 cycle_state)
+static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
+ union xhci_trb *deq_ptr, u32 cycle_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+ u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
@@ -4050,18 +3976,21 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
- return queue_command(xhci, lower_32_bits(addr) | cycle_state,
+ if (stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+ return queue_command(xhci, cmd,
+ lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
-int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index)
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ int slot_id, unsigned int ep_index)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
- return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
- false);
+ return queue_command(xhci, cmd, 0, 0, 0,
+ trb_slot_id | trb_ep_index | type, false);
}