diff options
author | Greg Kroah-Hartman <gregkh@suse.de> | 2011-05-02 17:05:19 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-05-02 17:05:23 -0700 |
commit | dbc265465a3fc8ac8d75d3ede7e84ea122a8fd0a (patch) | |
tree | 59e1ebdf34f49c48a5603ca541508492672eaecd /drivers/usb/host | |
parent | 71a9f9d268a5c2b0a80ae606cf8e502f3410a5df (diff) | |
parent | b61d378f2da41c748aba6ca19d77e1e1c02bcea5 (diff) |
Merge branch 'for-usb-next' of git+ssh://master.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next
* 'for-usb-next' of git+ssh://master.kernel.org/pub/scm/linux/kernel/git/sarah/xhci:
xhci 1.0: Set transfer burst last packet count field.
xhci 1.0: Set transfer burst count field.
xhci 1.0: Update TD size field format.
xhci 1.0: Only interrupt on short packet for IN EPs.
xhci: Remove sparse warning about cmd_status.
usbcore: warm reset USB3 port in SS.Inactive state
usbcore: Refine USB3.0 device suspend and resume
xHCI: report USB3.0 portstatus comply with USB3.0 specification
xHCI: Set link state support
xHCI: Clear link state change support
xHCI: warm reset support
usb/ch9: use proper endianess for wBytesPerInterval
xhci: Remove recursive call to xhci_handle_event
xhci: Add an assertion to check for virt_dev=0 bug.
xhci: Add rmb() between reading event validity & event data access.
xhci: Make xHCI driver endian-safe
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/xhci-dbg.c | 51 | ||||
-rw-r--r-- | drivers/usb/host/xhci-hub.c | 165 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 124 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 477 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 124 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 136 |
6 files changed, 658 insertions, 419 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 0231814a97a..2e0486178db 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -147,7 +147,7 @@ static void xhci_print_op_regs(struct xhci_hcd *xhci) static void xhci_print_ports(struct xhci_hcd *xhci) { - u32 __iomem *addr; + __le32 __iomem *addr; int i, j; int ports; char *names[NUM_PORT_REGS] = { @@ -253,27 +253,27 @@ void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb) void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) { u64 address; - u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK; + u32 type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK; switch (type) { case TRB_TYPE(TRB_LINK): xhci_dbg(xhci, "Link TRB:\n"); xhci_print_trb_offsets(xhci, trb); - address = trb->link.segment_ptr; + address = le64_to_cpu(trb->link.segment_ptr); xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Interrupter target = 0x%x\n", - GET_INTR_TARGET(trb->link.intr_target)); + GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target))); xhci_dbg(xhci, "Cycle bit = %u\n", - (unsigned int) (trb->link.control & TRB_CYCLE)); + (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE)); xhci_dbg(xhci, "Toggle cycle bit = %u\n", - (unsigned int) (trb->link.control & LINK_TOGGLE)); + (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE)); xhci_dbg(xhci, "No Snoop bit = %u\n", - (unsigned int) (trb->link.control & TRB_NO_SNOOP)); + (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP)); break; case TRB_TYPE(TRB_TRANSFER): - address = trb->trans_event.buffer; + address = le64_to_cpu(trb->trans_event.buffer); /* * FIXME: look at flags to figure out if it's an address or if * the data is directly in the buffer field. @@ -281,11 +281,12 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); break; case TRB_TYPE(TRB_COMPLETION): - address = trb->event_cmd.cmd_trb; + address = le64_to_cpu(trb->event_cmd.cmd_trb); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Completion status = %u\n", - (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); - xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags); + (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status))); + xhci_dbg(xhci, "Flags = 0x%x\n", + (unsigned int) le32_to_cpu(trb->event_cmd.flags)); break; default: xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", @@ -311,16 +312,16 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) { int i; - u32 addr = (u32) seg->dma; + u64 addr = seg->dma; union xhci_trb *trb = seg->trbs; for (i = 0; i < TRBS_PER_SEGMENT; ++i) { trb = &seg->trbs[i]; - xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, - lower_32_bits(trb->link.segment_ptr), - upper_32_bits(trb->link.segment_ptr), - (unsigned int) trb->link.intr_target, - (unsigned int) trb->link.control); + xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr, + (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)), + (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)), + (unsigned int) le32_to_cpu(trb->link.intr_target), + (unsigned int) le32_to_cpu(trb->link.control)); addr += sizeof(*trb); } } @@ -391,18 +392,18 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci, void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) { - u32 addr = (u32) erst->erst_dma_addr; + u64 addr = erst->erst_dma_addr; int i; struct xhci_erst_entry *entry; for (i = 0; i < erst->num_entries; ++i) { entry = &erst->entries[i]; - xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", - (unsigned int) addr, - lower_32_bits(entry->seg_addr), - upper_32_bits(entry->seg_addr), - (unsigned int) entry->seg_size, - (unsigned int) entry->rsvd); + xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", + addr, + lower_32_bits(le64_to_cpu(entry->seg_addr)), + upper_32_bits(le64_to_cpu(entry->seg_addr)), + (unsigned int) le32_to_cpu(entry->seg_size), + (unsigned int) le32_to_cpu(entry->rsvd)); addr += sizeof(*entry); } } @@ -436,7 +437,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, { struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); - switch (GET_SLOT_STATE(slot_ctx->dev_state)) { + switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) { case 0: return "enabled/disabled"; case 1: diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a78f2ebd11b..e3ddc6a95af 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -50,7 +50,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci, temp |= 0x0008; /* Bits 6:5 - no TTs in root ports */ /* Bit 7 - no port indicators */ - desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp); + desc->wHubCharacteristics = cpu_to_le16(temp); } /* Fill in the USB 2.0 roothub descriptor */ @@ -314,7 +314,7 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id) } static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, - u16 wIndex, u32 __iomem *addr, u32 port_status) + u16 wIndex, __le32 __iomem *addr, u32 port_status) { /* Don't allow the USB core to disable SuperSpeed ports. */ if (hcd->speed == HCD_USB3) { @@ -331,7 +331,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, } static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, - u16 wIndex, u32 __iomem *addr, u32 port_status) + u16 wIndex, __le32 __iomem *addr, u32 port_status) { char *port_change_bit; u32 status; @@ -341,6 +341,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, status = PORT_RC; port_change_bit = "reset"; break; + case USB_PORT_FEAT_C_BH_PORT_RESET: + status = PORT_WRC; + port_change_bit = "warm(BH) reset"; + break; case USB_PORT_FEAT_C_CONNECTION: status = PORT_CSC; port_change_bit = "connect"; @@ -357,6 +361,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, status = PORT_PLC; port_change_bit = "suspend/resume"; break; + case USB_PORT_FEAT_C_PORT_LINK_STATE: + status = PORT_PLC; + port_change_bit = "link state"; + break; default: /* Should never happen */ return; @@ -376,9 +384,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, unsigned long flags; u32 temp, temp1, status; int retval = 0; - u32 __iomem **port_array; + __le32 __iomem **port_array; int slot_id; struct xhci_bus_state *bus_state; + u16 link_state = 0; if (hcd->speed == HCD_USB3) { ports = xhci->num_usb3_ports; @@ -422,9 +431,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp); - /* FIXME - should we return a port status value like the USB - * 3.0 external hubs do? - */ /* wPortChange bits */ if (temp & PORT_CSC) status |= USB_PORT_STAT_C_CONNECTION << 16; @@ -432,13 +438,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, status |= USB_PORT_STAT_C_ENABLE << 16; if ((temp & PORT_OCC)) status |= USB_PORT_STAT_C_OVERCURRENT << 16; - /* - * FIXME ignoring reset and USB 2.1/3.0 specific - * changes - */ - if ((temp & PORT_PLS_MASK) == XDEV_U3 - && (temp & PORT_POWER)) - status |= 1 << USB_PORT_FEAT_SUSPEND; + if ((temp & PORT_RC)) + status |= USB_PORT_STAT_C_RESET << 16; + /* USB3.0 only */ + if (hcd->speed == HCD_USB3) { + if ((temp & PORT_PLC)) + status |= USB_PORT_STAT_C_LINK_STATE << 16; + if ((temp & PORT_WRC)) + status |= USB_PORT_STAT_C_BH_RESET << 16; + } + + if (hcd->speed != HCD_USB3) { + if ((temp & PORT_PLS_MASK) == XDEV_U3 + && (temp & PORT_POWER)) + status |= USB_PORT_STAT_SUSPEND; + } if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { if ((temp & PORT_RESET) || !(temp & PORT_PE)) goto error; @@ -469,7 +483,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, && (temp & PORT_POWER) && (bus_state->suspended_ports & (1 << wIndex))) { bus_state->suspended_ports &= ~(1 << wIndex); - bus_state->port_c_suspend |= 1 << wIndex; + if (hcd->speed != HCD_USB3) + bus_state->port_c_suspend |= 1 << wIndex; } if (temp & PORT_CONNECT) { status |= USB_PORT_STAT_CONNECTION; @@ -481,14 +496,28 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, status |= USB_PORT_STAT_OVERCURRENT; if (temp & PORT_RESET) status |= USB_PORT_STAT_RESET; - if (temp & PORT_POWER) - status |= USB_PORT_STAT_POWER; + if (temp & PORT_POWER) { + if (hcd->speed == HCD_USB3) + status |= USB_SS_PORT_STAT_POWER; + else + status |= USB_PORT_STAT_POWER; + } + /* Port Link State */ + if (hcd->speed == HCD_USB3) { + /* resume state is a xHCI internal state. + * Do not report it to usb core. + */ + if ((temp & PORT_PLS_MASK) != XDEV_RESUME) + status |= (temp & PORT_PLS_MASK); + } if (bus_state->port_c_suspend & (1 << wIndex)) status |= 1 << USB_PORT_FEAT_C_SUSPEND; xhci_dbg(xhci, "Get port status returned 0x%x\n", status); put_unaligned(cpu_to_le32(status), (__le32 *) buf); break; case SetPortFeature: + if (wValue == USB_PORT_FEAT_LINK_STATE) + link_state = (wIndex & 0xff00) >> 3; wIndex &= 0xff; if (!wIndex || wIndex > ports) goto error; @@ -537,6 +566,44 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, temp = xhci_readl(xhci, port_array[wIndex]); bus_state->suspended_ports |= 1 << wIndex; break; + case USB_PORT_FEAT_LINK_STATE: + temp = xhci_readl(xhci, port_array[wIndex]); + /* Software should not attempt to set + * port link state above '5' (Rx.Detect) and the port + * must be enabled. + */ + if ((temp & PORT_PE) == 0 || + (link_state > USB_SS_PORT_LS_RX_DETECT)) { + xhci_warn(xhci, "Cannot set link state.\n"); + goto error; + } + + if (link_state == USB_SS_PORT_LS_U3) { + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + wIndex + 1); + if (slot_id) { + /* unlock to execute stop endpoint + * commands */ + spin_unlock_irqrestore(&xhci->lock, + flags); + xhci_stop_device(xhci, slot_id, 1); + spin_lock_irqsave(&xhci->lock, flags); + } + } + + temp = xhci_port_state_to_neutral(temp); + temp &= ~PORT_PLS_MASK; + temp |= PORT_LINK_STROBE | link_state; + xhci_writel(xhci, temp, port_array[wIndex]); + + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(20); /* wait device to enter */ + spin_lock_irqsave(&xhci->lock, flags); + + temp = xhci_readl(xhci, port_array[wIndex]); + if (link_state == USB_SS_PORT_LS_U3) + bus_state->suspended_ports |= 1 << wIndex; + break; case USB_PORT_FEAT_POWER: /* * Turn on ports, even if there isn't per-port switching. @@ -557,6 +624,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, temp = xhci_readl(xhci, port_array[wIndex]); xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp); break; + case USB_PORT_FEAT_BH_PORT_RESET: + temp |= PORT_WR; + xhci_writel(xhci, temp, port_array[wIndex]); + + temp = xhci_readl(xhci, port_array[wIndex]); + break; default: goto error; } @@ -584,35 +657,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, if (temp & XDEV_U3) { if ((temp & PORT_PE) == 0) goto error; - if (DEV_SUPERSPEED(temp)) { - temp = xhci_port_state_to_neutral(temp); - temp &= ~PORT_PLS_MASK; - temp |= PORT_LINK_STROBE | XDEV_U0; - xhci_writel(xhci, temp, - port_array[wIndex]); - xhci_readl(xhci, port_array[wIndex]); - } else { - temp = xhci_port_state_to_neutral(temp); - temp &= ~PORT_PLS_MASK; - temp |= PORT_LINK_STROBE | XDEV_RESUME; - xhci_writel(xhci, temp, - port_array[wIndex]); - spin_unlock_irqrestore(&xhci->lock, - flags); - msleep(20); - spin_lock_irqsave(&xhci->lock, flags); + temp = xhci_port_state_to_neutral(temp); + temp &= ~PORT_PLS_MASK; + temp |= PORT_LINK_STROBE | XDEV_RESUME; + xhci_writel(xhci, temp, + port_array[wIndex]); - temp = xhci_readl(xhci, - port_array[wIndex]); - temp = xhci_port_state_to_neutral(temp); - temp &= ~PORT_PLS_MASK; - temp |= PORT_LINK_STROBE | XDEV_U0; - xhci_writel(xhci, temp, - port_array[wIndex]); - } - bus_state->port_c_suspend |= 1 << wIndex; + spin_unlock_irqrestore(&xhci->lock, + flags); + msleep(20); + spin_lock_irqsave(&xhci->lock, flags); + + temp = xhci_readl(xhci, + port_array[wIndex]); + temp = xhci_port_state_to_neutral(temp); + temp &= ~PORT_PLS_MASK; + temp |= PORT_LINK_STROBE | XDEV_U0; + xhci_writel(xhci, temp, + port_array[wIndex]); } + bus_state->port_c_suspend |= 1 << wIndex; slot_id = xhci_find_slot_id_by_port(hcd, xhci, wIndex + 1); @@ -625,9 +690,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_C_SUSPEND: bus_state->port_c_suspend &= ~(1 << wIndex); case USB_PORT_FEAT_C_RESET: + case USB_PORT_FEAT_C_BH_PORT_RESET: case USB_PORT_FEAT_C_CONNECTION: case USB_PORT_FEAT_C_OVER_CURRENT: case USB_PORT_FEAT_C_ENABLE: + case USB_PORT_FEAT_C_PORT_LINK_STATE: xhci_clear_port_change_bit(xhci, wValue, wIndex, port_array[wIndex], temp); break; @@ -664,7 +731,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) int i, retval; struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ports; - u32 __iomem **port_array; + __le32 __iomem **port_array; struct xhci_bus_state *bus_state; if (hcd->speed == HCD_USB3) { @@ -681,7 +748,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) memset(buf, 0, retval); status = 0; - mask = PORT_CSC | PORT_PEC | PORT_OCC; + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ @@ -709,7 +776,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); int max_ports, port_index; - u32 __iomem **port_array; + __le32 __iomem **port_array; struct xhci_bus_state *bus_state; unsigned long flags; @@ -779,7 +846,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) if (DEV_HIGHSPEED(t1)) { /* enable remote wake up for USB 2.0 */ - u32 __iomem *addr; + __le32 __iomem *addr; u32 tmp; /* Add one to the port status register address to get @@ -801,7 +868,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); int max_ports, port_index; - u32 __iomem **port_array; + __le32 __iomem **port_array; struct xhci_bus_state *bus_state; u32 temp; unsigned long flags; @@ -875,7 +942,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) if (DEV_HIGHSPEED(temp)) { /* disable remote wake up for USB 2.0 */ - u32 __iomem *addr; + __le32 __iomem *addr; u32 tmp; /* Add one to the port status register address to get diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 627f3438028..a4fc4d92938 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -89,16 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, return; prev->next = next; if (link_trbs) { - prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; + prev->trbs[TRBS_PER_SEGMENT-1].link. + segment_ptr = cpu_to_le64(next->dma); /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ - val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; + val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); val &= ~TRB_TYPE_BITMASK; val |= TRB_TYPE(TRB_LINK); /* Always set the chain bit with 0.95 hardware */ if (xhci_link_trb_quirk(xhci)) val |= TRB_CHAIN; - prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; + prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); } xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", (unsigned long long)prev->dma, @@ -186,7 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, if (link_trbs) { /* See section 4.9.2.1 and 6.4.4.1 */ - prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); + prev->trbs[TRBS_PER_SEGMENT-1].link. + control |= cpu_to_le32(LINK_TOGGLE); xhci_dbg(xhci, "Wrote link toggle flag to" " segment %p (virtual), 0x%llx (DMA)\n", prev, (unsigned long long)prev->dma); @@ -548,7 +550,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) | cur_ring->cycle_state; - stream_info->stream_ctx_array[cur_stream].stream_ring = addr; + stream_info->stream_ctx_array[cur_stream]. + stream_ring = cpu_to_le64(addr); xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, (unsigned long long) addr); @@ -614,10 +617,10 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", 1 << (max_primary_streams + 1)); - ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; - ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); - ep_ctx->ep_info |= EP_HAS_LSA; - ep_ctx->deq = stream_info->ctx_array_dma; + ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); + ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) + | EP_HAS_LSA); + ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); } /* @@ -630,10 +633,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { dma_addr_t addr; - ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; - ep_ctx->ep_info &= ~EP_HAS_LSA; + ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); - ep_ctx->deq = addr | ep->ring->cycle_state; + ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); } /* Frees all stream contexts associated with the endpoint, @@ -781,11 +783,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, dev->udev = udev; /* Point to output device context in dcbaa. */ - xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; + xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", - slot_id, - &xhci->dcbaa->dev_context_ptrs[slot_id], - (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); + slot_id, + &xhci->dcbaa->dev_context_ptrs[slot_id], + (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); return 1; fail: @@ -810,8 +812,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, * configured device has reset, so all control transfers should have * been completed or cancelled before the reset. */ - ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); - ep0_ctx->deq |= ep_ring->cycle_state; + ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, + ep_ring->enqueue) + | ep_ring->cycle_state); } /* @@ -885,24 +888,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); /* 2) New slot context and endpoint 0 context are valid*/ - ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; + ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); /* 3) Only the control endpoint is valid - one endpoint context */ - slot_ctx->dev_info |= LAST_CTX(1); - - slot_ctx->dev_info |= (u32) udev->route; + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route); switch (udev->speed) { case USB_SPEED_SUPER: - slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; + slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS); break; case USB_SPEED_HIGH: - slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; + slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS); break; case USB_SPEED_FULL: - slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; + slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS); break; case USB_SPEED_LOW: - slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; + slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS); break; case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); @@ -916,7 +917,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud port_num = xhci_find_real_port_number(xhci, udev); if (!port_num) return -EINVAL; - slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(port_num); + slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num)); /* Set the port number in the virtual_device to the faked port number */ for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) @@ -927,31 +928,31 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud /* Is this a LS/FS device under an external HS hub? */ if (udev->tt && udev->tt->hub->parent) { - slot_ctx->tt_info = udev->tt->hub->slot_id; - slot_ctx->tt_info |= udev->ttport << 8; + slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | + (udev->ttport << 8)); if (udev->tt->multi) - slot_ctx->dev_info |= DEV_MTT; + slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); } xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); /* Step 4 - ring already allocated */ /* Step 5 */ - ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); + ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); /* * XXX: Not sure about wireless USB devices. */ switch (udev->speed) { case USB_SPEED_SUPER: - ep0_ctx->ep_info2 |= MAX_PACKET(512); + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512)); break; case USB_SPEED_HIGH: /* USB core guesses at a 64-byte max packet first for FS devices */ case USB_SPEED_FULL: - ep0_ctx->ep_info2 |= MAX_PACKET(64); + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64)); break; case USB_SPEED_LOW: - ep0_ctx->ep_info2 |= MAX_PACKET(8); + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8)); break; case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); @@ -962,12 +963,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud BUG(); } /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ - ep0_ctx->ep_info2 |= MAX_BURST(0); - ep0_ctx->ep_info2 |= ERROR_COUNT(3); + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3)); - ep0_ctx->deq = - dev->eps[0].ring->first_seg->dma; - ep0_ctx->deq |= dev->eps[0].ring->cycle_state; + ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | + dev->eps[0].ring->cycle_state); /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ @@ -1131,10 +1130,10 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, return 0; if (udev->speed == USB_SPEED_SUPER) - return ep->ss_ep_comp.wBytesPerInterval; + return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); - max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); - max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; + max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); + max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11; /* A 0 in max burst means 1 transfer per ESIT */ return max_packet * (max_burst + 1); } @@ -1183,10 +1182,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, } virt_dev->eps[ep_index].skip = false; ep_ring = virt_dev->eps[ep_index].new_ring; - ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; + ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state); - ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); - ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); + ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) + | EP_MULT(xhci_get_endpoint_mult(udev, ep))); /* FIXME dig Mult and streams info out of ep companion desc */ @@ -1194,22 +1193,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, * error count = 0 means infinite retries. */ if (!usb_endpoint_xfer_isoc(&ep->desc)) - ep_ctx->ep_info2 = ERROR_COUNT(3); + ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3)); else - ep_ctx->ep_info2 = ERROR_COUNT(1); + ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(1)); - ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); + ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); /* Set the max packet size and max burst */ switch (udev->speed) { case USB_SPEED_SUPER: - max_packet = ep->desc.wMaxPacketSize; - ep_ctx->ep_info2 |= MAX_PACKET(max_packet); + max_packet = le16_to_cpu(ep->desc.wMaxPacketSize); + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); /* dig out max burst from ep companion desc */ max_packet = ep->ss_ep_comp.bMaxBurst; if (!max_packet) xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); - ep_ctx->ep_info2 |= MAX_BURST(max_packet); + ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); break; case USB_SPEED_HIGH: /* bits 11:12 specify the number of additional transaction @@ -1217,20 +1216,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, */ if (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) { - max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; - ep_ctx->ep_info2 |= MAX_BURST(max_burst); + max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) + & 0x1800) >> 11; + ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); } /* Fall through */ case USB_SPEED_FULL: case USB_SPEED_LOW: - max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); - ep_ctx->ep_info2 |= MAX_PACKET(max_packet); + max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); break; default: BUG(); } max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); - ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); + ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); /* * XXX no idea how to calculate the average TRB buffer length for bulk @@ -1247,7 +1247,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, * use Event Data TRBs, and we don't chain in a link TRB on short * transfers, we're basically dividing by 1. */ - ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); + ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload)); /* FIXME Debug endpoint context */ return 0; @@ -1347,7 +1347,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->scratchpad->sp_dma_buffers) goto fail_sp4; - xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; + xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); for (i = 0; i < num_sp; i++) { dma_addr_t dma; void *buf = pci_alloc_consistent(to_pci_dev(dev), @@ -1724,7 +1724,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) } static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, - u32 __iomem *addr, u8 major_revision) + __le32 __iomem *addr, u8 major_revision) { u32 temp, port_offset, port_count; int i; @@ -1789,7 +1789,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, */ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) { - u32 __iomem *addr; + __le32 __iomem *addr; u32 offset; unsigned int num_ports; int i, port_index; @@ -2042,8 +2042,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* set ring base address and size for each segment table entry */ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { struct xhci_erst_entry *entry = &xhci->erst.entries[val]; - entry->seg_addr = seg->dma; - entry->seg_size = TRBS_PER_SEGMENT; + entry->seg_addr = cpu_to_le64(seg->dma); + entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); entry->rsvd = 0; seg = seg->next; } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7437386a9a5..396f8d2a2e8 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -100,7 +100,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && (seg->next == xhci->event_ring->first_seg); else - return trb->link.control & LINK_TOGGLE; + return le32_to_cpu(trb->link.control) & LINK_TOGGLE; } /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring @@ -113,13 +113,15 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, if (ring == xhci->event_ring) return trb == &seg->trbs[TRBS_PER_SEGMENT]; else - return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); + return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK); } static int enqueue_is_link_trb(struct xhci_ring *ring) { struct xhci_link_trb *link = &ring->enqueue->link; - return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); + return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) == + TRB_TYPE(TRB_LINK)); } /* Updates trb to point to the next TRB in the ring, and updates seg if the next @@ -197,7 +199,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, union xhci_trb *next; unsigned long long addr; - chain = ring->enqueue->generic.field[3] & TRB_CHAIN; + chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; next = ++(ring->enqueue); ring->enq_updates++; @@ -223,12 +225,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, * (which may mean the chain bit is cleared). */ if (!xhci_link_trb_quirk(xhci)) { - next->link.control &= ~TRB_CHAIN; - next->link.control |= chain; + next->link.control &= + cpu_to_le32(~TRB_CHAIN); + next->link.control |= + cpu_to_le32(chain); } /* Give this link TRB to the hardware */ wmb(); - next->link.control ^= TRB_CYCLE; + next->link.control ^= cpu_to_le32(TRB_CYCLE); } /* Toggle the cycle bit after the last ring segment. */ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { @@ -319,7 +323,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int ep_index, unsigned int stream_id) { - __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; + __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; unsigned int ep_state = ep->ep_state; @@ -380,7 +384,7 @@ static struct xhci_segment *find_trb_seg( while (cur_seg->trbs > trb || &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; - if (generic_trb->field[3] & LINK_TOGGLE) + if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE) *cycle_state ^= 0x1; cur_seg = cur_seg->next; if (cur_seg == start_seg) @@ -447,6 +451,10 @@ static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, * any link TRBs with the toggle cycle bit set. * - Finally we move the dequeue state one TRB further, toggling the cycle bit * if we've moved it past a link TRB with the toggle cycle bit set. + * + * Some of the uses of xhci_generic_trb are grotty, but if they're done + * with correct __le32 accesses they should work fine. Only users of this are + * in here. */ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, @@ -480,7 +488,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg(xhci, "Finding endpoint context\n"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - state->new_cycle_state = 0x1 & ep_ctx->deq; + state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); state->new_deq_ptr = cur_td->last_trb; xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); @@ -493,8 +501,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, } trb = &state->new_deq_ptr->generic; - if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && - (trb->field[3] & LINK_TOGGLE)) + if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) == + TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE)) state->new_cycle_state ^= 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); @@ -529,12 +537,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; true; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == - TRB_TYPE(TRB_LINK)) { + if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK)) { /* Unchain any chained Link TRBs, but * leave the pointers intact. */ - cur_trb->generic.field[3] &= ~TRB_CHAIN; |