diff options
Diffstat (limited to 'drivers/usb/host/uhci-hcd.h')
| -rw-r--r-- | drivers/usb/host/uhci-hcd.h | 423 |
1 files changed, 300 insertions, 123 deletions
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index d5c8f4d9282..6f986d82472 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h @@ -67,25 +67,54 @@ #define USBPORTSC_RES3 0x4000 /* reserved, write zeroes */ #define USBPORTSC_RES4 0x8000 /* reserved, write zeroes */ -/* Legacy support register */ +/* PCI legacy support register */ #define USBLEGSUP 0xc0 #define USBLEGSUP_DEFAULT 0x2000 /* only PIRQ enable set */ #define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ #define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ -#define UHCI_PTR_BITS __constant_cpu_to_le32(0x000F) -#define UHCI_PTR_TERM __constant_cpu_to_le32(0x0001) -#define UHCI_PTR_QH __constant_cpu_to_le32(0x0002) -#define UHCI_PTR_DEPTH __constant_cpu_to_le32(0x0004) -#define UHCI_PTR_BREADTH __constant_cpu_to_le32(0x0000) +/* PCI Intel-specific resume-enable register */ +#define USBRES_INTEL 0xc4 +#define USBPORT1EN 0x01 +#define USBPORT2EN 0x02 + +#define UHCI_PTR_BITS(uhci) cpu_to_hc32((uhci), 0x000F) +#define UHCI_PTR_TERM(uhci) cpu_to_hc32((uhci), 0x0001) +#define UHCI_PTR_QH(uhci) cpu_to_hc32((uhci), 0x0002) +#define UHCI_PTR_DEPTH(uhci) cpu_to_hc32((uhci), 0x0004) +#define UHCI_PTR_BREADTH(uhci) cpu_to_hc32((uhci), 0x0000) #define UHCI_NUMFRAMES 1024 /* in the frame list [array] */ #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ #define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames * can be scheduled */ +#define MAX_PHASE 32 /* Periodic scheduling length */ + +/* When no queues need Full-Speed Bandwidth Reclamation, + * delay this long before turning FSBR off */ +#define FSBR_OFF_DELAY msecs_to_jiffies(10) + +/* If a queue hasn't advanced after this much time, assume it is stuck */ +#define QH_WAIT_TIMEOUT msecs_to_jiffies(200) /* + * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to + * __leXX (normally) or __beXX (given UHCI_BIG_ENDIAN_DESC), depending on + * the host controller implementation. + * + * To facilitate the strongest possible byte-order checking from "sparse" + * and so on, we use __leXX unless that's not practical. + */ +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC +typedef __u32 __bitwise __hc32; +typedef __u16 __bitwise __hc16; +#else +#define __hc32 __le32 +#define __hc16 __le16 +#endif + +/* * Queue Headers */ @@ -117,8 +146,8 @@ struct uhci_qh { /* Hardware fields */ - __le32 link; /* Next QH in the schedule */ - __le32 element; /* Queue element (TD) pointer */ + __hc32 link; /* Next QH in the schedule */ + __hc32 element; /* Queue element (TD) pointer */ /* Software fields */ dma_addr_t dma_handle; @@ -127,27 +156,38 @@ struct uhci_qh { struct usb_host_endpoint *hep; /* Endpoint information */ struct usb_device *udev; struct list_head queue; /* Queue of urbps for this QH */ - struct uhci_qh *skel; /* Skeleton for this QH */ struct uhci_td *dummy_td; /* Dummy TD to end the queue */ + struct uhci_td *post_td; /* Last TD completed */ + struct usb_iso_packet_descriptor *iso_packet_desc; + /* Next urb->iso_frame_desc entry */ + unsigned long advance_jiffies; /* Time of last queue advance */ unsigned int unlink_frame; /* When the QH was unlinked */ + unsigned int period; /* For Interrupt and Isochronous QHs */ + short phase; /* Between 0 and period-1 */ + short load; /* Periodic time requirement, in us */ + unsigned int iso_frame; /* Frame # for iso_packet_desc */ + int state; /* QH_STATE_xxx; see above */ + int type; /* Queue type (control, bulk, etc) */ + int skel; /* Skeleton queue number */ unsigned int initial_toggle:1; /* Endpoint's current toggle value */ unsigned int needs_fixup:1; /* Must fix the TD toggle values */ - unsigned int is_stopped:1; /* Queue was stopped by an error */ + unsigned int is_stopped:1; /* Queue was stopped by error/unlink */ + unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */ + unsigned int bandwidth_reserved:1; /* Periodic bandwidth has + * been allocated */ } __attribute__((aligned(16))); /* * We need a special accessor for the element pointer because it is * subject to asynchronous updates by the controller. */ -static inline __le32 qh_element(struct uhci_qh *qh) { - __le32 element = qh->element; +#define qh_element(qh) ACCESS_ONCE((qh)->element) - barrier(); - return element; -} +#define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ + cpu_to_hc32((uhci), (qh)->dma_handle)) /* @@ -172,10 +212,6 @@ static inline __le32 qh_element(struct uhci_qh *qh) { #define TD_CTRL_BITSTUFF (1 << 17) /* Bit Stuff Error */ #define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */ -#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \ - TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \ - TD_CTRL_BITSTUFF) - #define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT) #define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000) #define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \ @@ -184,7 +220,7 @@ static inline __le32 qh_element(struct uhci_qh *qh) { /* * for TD <info>: (a.k.a. Token) */ -#define td_token(td) le32_to_cpu((td)->token) +#define td_token(uhci, td) hc32_to_cpu((uhci), (td)->token) #define TD_TOKEN_DEVADDR_SHIFT 8 #define TD_TOKEN_TOGGLE_SHIFT 19 #define TD_TOKEN_TOGGLE (1 << 19) @@ -217,16 +253,15 @@ static inline __le32 qh_element(struct uhci_qh *qh) { */ struct uhci_td { /* Hardware fields */ - __le32 link; - __le32 status; - __le32 token; - __le32 buffer; + __hc32 link; + __hc32 status; + __hc32 token; + __hc32 buffer; /* Software fields */ dma_addr_t dma_handle; struct list_head list; - struct list_head remove_list; int frame; /* for iso: what frame? */ struct list_head fl_list; @@ -236,12 +271,10 @@ struct uhci_td { * We need a special accessor for the control/status word because it is * subject to asynchronous updates by the controller. */ -static inline u32 td_status(struct uhci_td *td) { - __le32 status = td->status; +#define td_status(uhci, td) hc32_to_cpu((uhci), \ + ACCESS_ONCE((td)->status)) - barrier(); - return le32_to_cpu(status); -} +#define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) /* @@ -251,12 +284,13 @@ static inline u32 td_status(struct uhci_td *td) { /* * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for * automatic queuing. To make it easy to insert entries into the schedule, - * we have a skeleton of QHs for each predefined Interrupt latency, - * low-speed control, full-speed control, bulk, and terminating QH - * (see explanation for the terminating QH below). + * we have a skeleton of QHs for each predefined Interrupt latency. + * Asynchronous QHs (low-speed control, full-speed control, and bulk) + * go onto the period-1 interrupt list, since they all get accessed on + * every frame. * - * When we want to add a new QH, we add it to the end of the list for the - * skeleton QH. For instance, the schedule list can look like this: + * When we want to add a new QH, we add it to the list starting from the + * appropriate skeleton QH. For instance, the schedule can look like this: * * skel int128 QH * dev 1 interrupt QH @@ -264,80 +298,47 @@ static inline u32 td_status(struct uhci_td *td) { * skel int64 QH * skel int32 QH * ... - * skel int1 QH - * skel low-speed control QH - * dev 5 control QH - * skel full-speed control QH - * skel bulk QH + * skel int1 + async QH + * dev 5 low-speed control QH * dev 1 bulk QH * dev 2 bulk QH - * skel terminating QH * - * The terminating QH is used for 2 reasons: - * - To place a terminating TD which is used to workaround a PIIX bug - * (see Intel errata for explanation), and - * - To loop back to the full-speed control queue for full-speed bandwidth - * reclamation. + * There is a special terminating QH used to keep full-speed bandwidth + * reclamation active when no full-speed control or bulk QHs are linked + * into the schedule. It has an inactive TD (to work around a PIIX bug, + * see the Intel errata) and it points back to itself. * - * There's a special skeleton QH for Isochronous QHs. It never appears - * on the schedule, and Isochronous TDs go on the schedule before the + * There's a special skeleton QH for Isochronous QHs which never appears + * on the schedule. Isochronous TDs go on the schedule before the * the skeleton QHs. The hardware accesses them directly rather than * through their QH, which is used only for bookkeeping purposes. * While the UHCI spec doesn't forbid the use of QHs for Isochronous, * it doesn't use them either. And the spec says that queues never * advance on an error completion status, which makes them totally * unsuitable for Isochronous transfers. - */ - -#define UHCI_NUM_SKELQH 14 -#define skel_unlink_qh skelqh[0] -#define skel_iso_qh skelqh[1] -#define skel_int128_qh skelqh[2] -#define skel_int64_qh skelqh[3] -#define skel_int32_qh skelqh[4] -#define skel_int16_qh skelqh[5] -#define skel_int8_qh skelqh[6] -#define skel_int4_qh skelqh[7] -#define skel_int2_qh skelqh[8] -#define skel_int1_qh skelqh[9] -#define skel_ls_control_qh skelqh[10] -#define skel_fs_control_qh skelqh[11] -#define skel_bulk_qh skelqh[12] -#define skel_term_qh skelqh[13] - -/* - * Search tree for determining where <interval> fits in the skelqh[] - * skeleton. - * - * An interrupt request should be placed into the slowest skelqh[] - * which meets the interval/period/frequency requirement. - * An interrupt request is allowed to be faster than <interval> but not slower. * - * For a given <interval>, this function returns the appropriate/matching - * skelqh[] index value. + * There's also a special skeleton QH used for QHs which are in the process + * of unlinking and so may still be in use by the hardware. It too never + * appears on the schedule. */ -static inline int __interval_to_skel(int interval) -{ - if (interval < 16) { - if (interval < 4) { - if (interval < 2) - return 9; /* int1 for 0-1 ms */ - return 8; /* int2 for 2-3 ms */ - } - if (interval < 8) - return 7; /* int4 for 4-7 ms */ - return 6; /* int8 for 8-15 ms */ - } - if (interval < 64) { - if (interval < 32) - return 5; /* int16 for 16-31 ms */ - return 4; /* int32 for 32-63 ms */ - } - if (interval < 128) - return 3; /* int64 for 64-127 ms */ - return 2; /* int128 for 128-255 ms (Max.) */ -} +#define UHCI_NUM_SKELQH 11 +#define SKEL_UNLINK 0 +#define skel_unlink_qh skelqh[SKEL_UNLINK] +#define SKEL_ISO 1 +#define skel_iso_qh skelqh[SKEL_ISO] + /* int128, int64, ..., int1 = 2, 3, ..., 9 */ +#define SKEL_INDEX(exponent) (9 - exponent) +#define SKEL_ASYNC 9 +#define skel_async_qh skelqh[SKEL_ASYNC] +#define SKEL_TERM 10 +#define skel_term_qh skelqh[SKEL_TERM] + +/* The following entries refer to sublists of skel_async_qh */ +#define SKEL_LS_CONTROL 20 +#define SKEL_FS_CONTROL 21 +#define SKEL_FSBR SKEL_FS_CONTROL +#define SKEL_BULK 22 /* * The UHCI controller and root hub @@ -383,6 +384,9 @@ struct uhci_hcd { /* Grabbed from PCI */ unsigned long io_addr; + /* Used when registers are memory mapped */ + void __iomem *regs; + struct dma_pool *qh_pool; struct dma_pool *td_pool; @@ -393,41 +397,61 @@ struct uhci_hcd { spinlock_t lock; dma_addr_t frame_dma_handle; /* Hardware frame list */ - __le32 *frame; + __hc32 *frame; void **frame_cpu; /* CPU's frame list */ - int fsbr; /* Full-speed bandwidth reclamation */ - unsigned long fsbrtimeout; /* FSBR delay */ - enum uhci_rh_state rh_state; unsigned long auto_stop_time; /* When to AUTO_STOP */ unsigned int frame_number; /* As of last check */ unsigned int is_stopped; #define UHCI_IS_STOPPED 9999 /* Larger than a frame # */ + unsigned int last_iso_frame; /* Frame of last scan */ + unsigned int cur_iso_frame; /* Frame for current scan */ unsigned int scan_in_progress:1; /* Schedule scan is running */ unsigned int need_rescan:1; /* Redo the schedule scan */ - unsigned int hc_inaccessible:1; /* HC is suspended or dead */ - unsigned int working_RD:1; /* Suspended root hub doesn't - need to be polled */ + unsigned int dead:1; /* Controller has died */ + unsigned int RD_enable:1; /* Suspended root hub with + Resume-Detect interrupts + enabled */ unsigned int is_initialized:1; /* Data structure is usable */ + unsigned int fsbr_is_on:1; /* FSBR is turned on */ + unsigned int fsbr_is_wanted:1; /* Does any URB want FSBR? */ + unsigned int fsbr_expiring:1; /* FSBR is timing out */ + + struct timer_list fsbr_timer; /* For turning off FBSR */ + + /* Silicon quirks */ + unsigned int oc_low:1; /* OverCurrent bit active low */ + unsigned int wait_for_hp:1; /* Wait for HP port reset */ + unsigned int big_endian_mmio:1; /* Big endian registers */ + unsigned int big_endian_desc:1; /* Big endian descriptors */ /* Support for port suspend/resume/reset */ unsigned long port_c_suspend; /* Bit-arrays of ports */ unsigned long resuming_ports; unsigned long ports_timeout; /* Time to stop signalling */ - /* List of TDs that are done, but waiting to be freed (race) */ - struct list_head td_remove_list; - unsigned int td_remove_age; /* Age in frames */ - struct list_head idle_qh_list; /* Where the idle QHs live */ int rh_numports; /* Number of root-hub ports */ wait_queue_head_t waitqh; /* endpoint_disable waiters */ int num_waiting; /* Number of waiters */ + + int total_load; /* Sum of array values */ + short load[MAX_PHASE]; /* Periodic allocations */ + + /* Reset host controller */ + void (*reset_hc) (struct uhci_hcd *uhci); + int (*check_and_reset_hc) (struct uhci_hcd *uhci); + /* configure_hc should perform arch specific settings, if needed */ + void (*configure_hc) (struct uhci_hcd *uhci); + /* Check for broken resume detect interrupts */ + int (*resume_detect_interrupts_are_broken) (struct uhci_hcd *uhci); + /* Check for broken global suspend */ + int (*global_suspend_mode_is_broken) (struct uhci_hcd *uhci); }; /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ @@ -442,6 +466,9 @@ static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci) #define uhci_dev(u) (uhci_to_hcd(u)->self.controller) +/* Utility macro for comparing frame numbers */ +#define uhci_frame_before_eq(f1, f2) (0 <= (int) ((f2) - (f1))) + /* * Private per-URB data @@ -454,30 +481,180 @@ struct urb_priv { struct uhci_qh *qh; /* QH for this URB */ struct list_head td_list; - unsigned fsbr : 1; /* URB turned on FSBR */ - unsigned short_transfer : 1; /* URB got a short transfer, no - * need to rescan */ + unsigned fsbr:1; /* URB wants FSBR */ }; +/* Some special IDs */ + +#define PCI_VENDOR_ID_GENESYS 0x17a0 +#define PCI_DEVICE_ID_GL880S_UHCI 0x8083 + /* - * Locking in uhci.c - * - * Almost everything relating to the hardware schedule and processing - * of URBs is protected by uhci->lock. urb->status is protected by - * urb->lock; that's the one exception. - * - * To prevent deadlocks, never lock uhci->lock while holding urb->lock. - * The safe order of locking is: + * Functions used to access controller registers. The UCHI spec says that host + * controller I/O registers are mapped into PCI I/O space. For non-PCI hosts + * we use memory mapped registers. + */ + +#ifndef CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC +/* Support PCI only */ +static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg) +{ + return inl(uhci->io_addr + reg); +} + +static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg) +{ + outl(val, uhci->io_addr + reg); +} + +static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg) +{ + return inw(uhci->io_addr + reg); +} + +static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg) +{ + outw(val, uhci->io_addr + reg); +} + +static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg) +{ + return inb(uhci->io_addr + reg); +} + +static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg) +{ + outb(val, uhci->io_addr + reg); +} + +#else +/* Support non-PCI host controllers */ +#ifdef CONFIG_PCI +/* Support PCI and non-PCI host controllers */ +#define uhci_has_pci_registers(u) ((u)->io_addr != 0) +#else +/* Support non-PCI host controllers only */ +#define uhci_has_pci_registers(u) 0 +#endif + +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO +/* Support (non-PCI) big endian host controllers */ +#define uhci_big_endian_mmio(u) ((u)->big_endian_mmio) +#else +#define uhci_big_endian_mmio(u) 0 +#endif + +static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg) +{ + if (uhci_has_pci_registers(uhci)) + return inl(uhci->io_addr + reg); +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO + else if (uhci_big_endian_mmio(uhci)) + return readl_be(uhci->regs + reg); +#endif + else + return readl(uhci->regs + reg); +} + +static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg) +{ + if (uhci_has_pci_registers(uhci)) + outl(val, uhci->io_addr + reg); +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO + else if (uhci_big_endian_mmio(uhci)) + writel_be(val, uhci->regs + reg); +#endif + else + writel(val, uhci->regs + reg); +} + +static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg) +{ + if (uhci_has_pci_registers(uhci)) + return inw(uhci->io_addr + reg); +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO + else if (uhci_big_endian_mmio(uhci)) + return readw_be(uhci->regs + reg); +#endif + else + return readw(uhci->regs + reg); +} + +static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg) +{ + if (uhci_has_pci_registers(uhci)) + outw(val, uhci->io_addr + reg); +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO + else if (uhci_big_endian_mmio(uhci)) + writew_be(val, uhci->regs + reg); +#endif + else + writew(val, uhci->regs + reg); +} + +static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg) +{ + if (uhci_has_pci_registers(uhci)) + return inb(uhci->io_addr + reg); +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO + else if (uhci_big_endian_mmio(uhci)) + return readb_be(uhci->regs + reg); +#endif + else + return readb(uhci->regs + reg); +} + +static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg) +{ + if (uhci_has_pci_registers(uhci)) + outb(val, uhci->io_addr + reg); +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO + else if (uhci_big_endian_mmio(uhci)) + writeb_be(val, uhci->regs + reg); +#endif + else + writeb(val, uhci->regs + reg); +} +#endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */ + +/* + * The GRLIB GRUSBHC controller can use big endian format for its descriptors. * - * #1 uhci->lock - * #2 urb->lock + * UHCI controllers accessed through PCI work normally (little-endian + * everywhere), so we don't bother supporting a BE-only mode. */ +#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC +#define uhci_big_endian_desc(u) ((u)->big_endian_desc) +/* cpu to uhci */ +static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x) +{ + return uhci_big_endian_desc(uhci) + ? (__force __hc32)cpu_to_be32(x) + : (__force __hc32)cpu_to_le32(x); +} -/* Some special IDs */ +/* uhci to cpu */ +static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x) +{ + return uhci_big_endian_desc(uhci) + ? be32_to_cpu((__force __be32)x) + : le32_to_cpu((__force __le32)x); +} -#define PCI_VENDOR_ID_GENESYS 0x17a0 -#define PCI_DEVICE_ID_GL880S_UHCI 0x8083 +#else +/* cpu to uhci */ +static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x) +{ + return cpu_to_le32(x); +} + +/* uhci to cpu */ +static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x) +{ + return le32_to_cpu(x); +} +#endif #endif |
