aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/Kconfig8
-rw-r--r--drivers/usb/wusbcore/cbaf.c45
-rw-r--r--drivers/usb/wusbcore/crypto.c6
-rw-r--r--drivers/usb/wusbcore/devconnect.c102
-rw-r--r--drivers/usb/wusbcore/mmc.c43
-rw-r--r--drivers/usb/wusbcore/pal.c6
-rw-r--r--drivers/usb/wusbcore/reservation.c6
-rw-r--r--drivers/usb/wusbcore/rh.c57
-rw-r--r--drivers/usb/wusbcore/security.c146
-rw-r--r--drivers/usb/wusbcore/wa-hc.c23
-rw-r--r--drivers/usb/wusbcore/wa-hc.h98
-rw-r--r--drivers/usb/wusbcore/wa-nep.c13
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c114
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c2078
-rw-r--r--drivers/usb/wusbcore/wusbhc.c96
-rw-r--r--drivers/usb/wusbcore/wusbhc.h24
16 files changed, 2177 insertions, 688 deletions
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
index eb09a0a14a8..0e17b966e1b 100644
--- a/drivers/usb/wusbcore/Kconfig
+++ b/drivers/usb/wusbcore/Kconfig
@@ -2,10 +2,9 @@
# Wireless USB Core configuration
#
config USB_WUSB
- tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- depends on USB
- select UWB
+ tristate "Enable Wireless USB extensions"
+ depends on PCI
+ depends on UWB
select CRYPTO
select CRYPTO_BLKCIPHER
select CRYPTO_CBC
@@ -19,7 +18,6 @@ config USB_WUSB
config USB_WUSB_CBAF
tristate "Support WUSB Cable Based Association (CBA)"
- depends on USB
help
Some WUSB devices support Cable Based Association. It's used to
enable the secure communication between the host and the
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index c0c5665e60a..da1b872918b 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -144,7 +144,7 @@ static int cbaf_check(struct cbaf *cbaf)
CBAF_REQ_GET_ASSOCIATION_INFORMATION,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot get available association types: %d\n",
result);
@@ -184,7 +184,7 @@ static int cbaf_check(struct cbaf *cbaf)
assoc_request = itr;
if (top - itr < sizeof(*assoc_request)) {
- dev_err(dev, "Not enough data to decode associaton "
+ dev_err(dev, "Not enough data to decode association "
"request (%zu vs %zu bytes needed)\n",
top - itr, sizeof(*assoc_request));
break;
@@ -208,9 +208,9 @@ static int cbaf_check(struct cbaf *cbaf)
ar_name = "ASSOCIATE";
ar_assoc = 1;
break;
- };
+ }
break;
- };
+ }
dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
"(%zu bytes): %s\n",
@@ -235,7 +235,7 @@ static int cbaf_check(struct cbaf *cbaf)
static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
.CHID_hdr = WUSB_AR_CHID,
@@ -260,12 +260,13 @@ static int cbaf_send_host_info(struct cbaf *cbaf)
hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
hi_size = sizeof(*hi) + name_len;
- return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+ return usb_control_msg(cbaf->usb_dev,
+ usb_sndctrlpipe(cbaf->usb_dev, 0),
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0101,
cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- hi, hi_size, 1000 /* FIXME: arbitrary */);
+ hi, hi_size, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -288,9 +289,10 @@ static int cbaf_cdid_get(struct cbaf *cbaf)
CBAF_REQ_GET_ASSOCIATION_REQUEST,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
- dev_err(dev, "Cannot request device information: %d\n", result);
+ dev_err(dev, "Cannot request device information: %d\n",
+ result);
return result;
}
@@ -298,7 +300,7 @@ static int cbaf_cdid_get(struct cbaf *cbaf)
if (result < needed) {
dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
"%zu bytes needed)\n", (size_t)result, needed);
- return result;
+ return -ENOENT;
}
strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
@@ -350,7 +352,7 @@ static ssize_t cbaf_wusb_chid_store(struct device *dev,
return result;
result = cbaf_cdid_get(cbaf);
if (result < 0)
- return -result;
+ return result;
return size;
}
static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
@@ -491,11 +493,11 @@ static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
.Length_hdr = WUSB_AR_Length,
- .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
+ .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
.ConnectionContext_hdr = WUSB_AR_ConnectionContext,
.BandGroups_hdr = WUSB_AR_BandGroups,
};
@@ -536,7 +538,7 @@ static int cbaf_cc_upload(struct cbaf *cbaf)
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */);
+ ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT);
return result;
}
@@ -623,6 +625,8 @@ static int cbaf_probe(struct usb_interface *iface,
error_create_group:
error_check:
+ usb_put_intf(iface);
+ usb_put_dev(cbaf->usb_dev);
kfree(cbaf->buffer);
error_kmalloc_buffer:
kfree(cbaf);
@@ -637,6 +641,7 @@ static void cbaf_disconnect(struct usb_interface *iface)
sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
usb_set_intfdata(iface, NULL);
usb_put_intf(iface);
+ usb_put_dev(cbaf->usb_dev);
kfree(cbaf->buffer);
/* paranoia: clean up crypto keys */
kzfree(cbaf);
@@ -655,17 +660,7 @@ static struct usb_driver cbaf_driver = {
.disconnect = cbaf_disconnect,
};
-static int __init cbaf_driver_init(void)
-{
- return usb_register(&cbaf_driver);
-}
-module_init(cbaf_driver_init);
-
-static void __exit cbaf_driver_exit(void)
-{
- usb_deregister(&cbaf_driver);
-}
-module_exit(cbaf_driver_exit);
+module_usb_driver(cbaf_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless USB Cable Based Association");
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 827c87f10cc..9a95b2dc6d1 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -87,7 +87,7 @@ struct aes_ccm_block {
* B1 contains l(a), the MAC header, the encryption offset and padding.
*
* If EO is nonzero, additional blocks are built from payload bytes
- * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The
+ * until EO is exhausted (FIXME: padding to 16 bytes, I guess). The
* padding is not xmitted.
*/
@@ -180,7 +180,7 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
* using the 14 bytes of @a to fill up
* b1.{mac_header,e0,security_reserved,padding}.
*
- * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of
+ * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of
* l(m) is orthogonal, they bear no relationship, so it is not
* in conflict with the parameter's relation that
* WUSB1.0[6.4.2]) defines.
@@ -272,7 +272,7 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
/* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
* The procedure is to AES crypt the A0 block and XOR the MIC
- * Tag agains it; we only do the first 8 bytes and place it
+ * Tag against it; we only do the first 8 bytes and place it
* directly in the destination buffer.
*
* POS Crypto API: size is assumed to be AES's block size.
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 7ec24e46b34..0677139c606 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -90,24 +90,19 @@
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/export.h>
#include "wusbhc.h"
static void wusbhc_devconnect_acked_work(struct work_struct *work);
static void wusb_dev_free(struct wusb_dev *wusb_dev)
{
- if (wusb_dev) {
- kfree(wusb_dev->set_gtk_req);
- usb_free_urb(wusb_dev->set_gtk_urb);
- kfree(wusb_dev);
- }
+ kfree(wusb_dev);
}
static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
{
struct wusb_dev *wusb_dev;
- struct urb *urb;
- struct usb_ctrlrequest *req;
wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
if (wusb_dev == NULL)
@@ -117,22 +112,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (urb == NULL)
- goto err;
- wusb_dev->set_gtk_urb = urb;
-
- req = kmalloc(sizeof(*req), GFP_KERNEL);
- if (req == NULL)
- goto err;
- wusb_dev->set_gtk_req = req;
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
- req->bRequest = USB_REQ_SET_DESCRIPTOR;
- req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
- req->wIndex = 0;
- req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
-
return wusb_dev;
err:
wusb_dev_free(wusb_dev);
@@ -286,9 +265,9 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work)
* Addresses: because WUSB hosts have no downstream hubs, we can do a
* 1:1 mapping between 'port number' and device
* address. This simplifies many things, as during this
- * initial connect phase the USB stack has no knoledge of
+ * initial connect phase the USB stack has no knowledge of
* the device and hasn't assigned an address yet--we know
- * USB's choose_address() will use the same euristics we
+ * USB's choose_address() will use the same heuristics we
* use here, so we can assume which address will be assigned.
*
* USB stack always assigns address 1 to the root hub, so
@@ -305,7 +284,7 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
struct device *dev = wusbhc->dev;
struct wusb_dev *wusb_dev;
struct wusb_port *port;
- unsigned idx, devnum;
+ unsigned idx;
mutex_lock(&wusbhc->mutex);
@@ -333,8 +312,6 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
goto error_unlock;
}
- devnum = idx + 2;
-
/* Make sure we are using no crypto on that "virtual port" */
wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
@@ -410,9 +387,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
/*
* Refresh the list of keep alives to emit in the MMC
*
- * Some devices don't respond to keep alives unless they've been
- * authenticated, so skip unauthenticated devices.
- *
* We only publish the first four devices that have a coming timeout
* condition. Then when we are done processing those, we go for the
* next ones. We ignore the ones that have timed out already (they'll
@@ -447,15 +421,15 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
if (wusb_dev == NULL)
continue;
- if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated)
+ if (wusb_dev->usb_dev == NULL)
continue;
if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
dev_err(dev, "KEEPALIVE: device %u timed out\n",
wusb_dev->addr);
__wusbhc_dev_disconnect(wusbhc, wusb_port);
- } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) {
- /* Approaching timeout cut out, need to refresh */
+ } else if (time_after(jiffies, wusb_dev->entry_ts + tt/3)) {
+ /* Approaching timeout cut off, need to refresh */
ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
}
}
@@ -523,11 +497,19 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
*
* @wusbhc shall be referenced and unlocked
*/
-static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr)
{
+ struct wusb_dev *wusb_dev;
+
mutex_lock(&wusbhc->mutex);
- wusb_dev->entry_ts = jiffies;
- __wusbhc_keep_alive(wusbhc);
+ wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+ if (wusb_dev == NULL) {
+ dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n",
+ srcaddr);
+ } else {
+ wusb_dev->entry_ts = jiffies;
+ __wusbhc_keep_alive(wusbhc);
+ }
mutex_unlock(&wusbhc->mutex);
}
@@ -581,14 +563,22 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
*
* @wusbhc shall be referenced and unlocked
*/
-static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr)
{
struct device *dev = wusbhc->dev;
-
- dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
+ struct wusb_dev *wusb_dev;
mutex_lock(&wusbhc->mutex);
- __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx));
+ wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+ if (wusb_dev == NULL) {
+ dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n",
+ srcaddr);
+ } else {
+ dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n",
+ wusb_dev->addr);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc,
+ wusb_dev->port_idx));
+ }
mutex_unlock(&wusbhc->mutex);
}
@@ -610,30 +600,21 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
struct wusb_dn_hdr *dn_hdr, size_t size)
{
struct device *dev = wusbhc->dev;
- struct wusb_dev *wusb_dev;
if (size < sizeof(struct wusb_dn_hdr)) {
dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
(int)size, (int)sizeof(struct wusb_dn_hdr));
return;
}
-
- wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
- if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
- dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
- dn_hdr->bType, srcaddr);
- return;
- }
-
switch (dn_hdr->bType) {
case WUSB_DN_CONNECT:
wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
break;
case WUSB_DN_ALIVE:
- wusbhc_handle_dn_alive(wusbhc, wusb_dev);
+ wusbhc_handle_dn_alive(wusbhc, srcaddr);
break;
case WUSB_DN_DISCONNECT:
- wusbhc_handle_dn_disconnect(wusbhc, wusb_dev);
+ wusbhc_handle_dn_disconnect(wusbhc, srcaddr);
break;
case WUSB_DN_MASAVAILCHANGED:
case WUSB_DN_RWAKE:
@@ -846,19 +827,6 @@ static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev)
wusb_dev->wusb_cap_descr = NULL;
};
-static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
- .bLength = sizeof(wusb_cap_descr_default),
- .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
- .bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB,
-
- .bmAttributes = USB_WIRELESS_BEACON_NONE,
- .wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53),
- .bmTFITXPowerInfo = 0,
- .bmFFITXPowerInfo = 0,
- .bmBandGroup = cpu_to_le16(0x0001), /* WUSB1.0[7.4.1] bottom */
- .bReserved = 0
-};
-
/*
* USB stack's device addition Notifier Callback
*
@@ -985,7 +953,7 @@ int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
default:
WARN_ON(1);
result = NOTIFY_BAD;
- };
+ }
return result;
}
@@ -1074,7 +1042,7 @@ int wusbhc_devconnect_start(struct wusbhc *wusbhc)
wusbhc->wuie_host_info = hi;
queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
- (wusbhc->trust_timeout*CONFIG_HZ)/1000/2);
+ msecs_to_jiffies(wusbhc->trust_timeout / 2));
return 0;
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 0a57ff0a0b0..3f485df9622 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -38,6 +38,7 @@
*/
#include <linux/usb/wusb.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "wusbhc.h"
/* Initialize the MMCIEs handling mechanism */
@@ -194,6 +195,7 @@ int wusbhc_start(struct wusbhc *wusbhc)
struct device *dev = wusbhc->dev;
WARN_ON(wusbhc->wuie_host_info != NULL);
+ BUG_ON(wusbhc->uwb_rc == NULL);
result = wusbhc_rsv_establish(wusbhc);
if (result < 0) {
@@ -204,18 +206,20 @@ int wusbhc_start(struct wusbhc *wusbhc)
result = wusbhc_devconnect_start(wusbhc);
if (result < 0) {
- dev_err(dev, "error enabling device connections: %d\n", result);
+ dev_err(dev, "error enabling device connections: %d\n",
+ result);
goto error_devconnect_start;
}
result = wusbhc_sec_start(wusbhc);
if (result < 0) {
- dev_err(dev, "error starting security in the HC: %d\n", result);
+ dev_err(dev, "error starting security in the HC: %d\n",
+ result);
goto error_sec_start;
}
- /* FIXME: the choice of the DNTS parameters is somewhat
- * arbitrary */
- result = wusbhc->set_num_dnts(wusbhc, 0, 15);
+
+ result = wusbhc->set_num_dnts(wusbhc, wusbhc->dnts_interval,
+ wusbhc->dnts_num_slots);
if (result < 0) {
dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
goto error_set_num_dnts;
@@ -275,12 +279,39 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
}
wusbhc->chid = *chid;
}
+
+ /* register with UWB if we haven't already since we are about to start
+ the radio. */
+ if ((chid) && (wusbhc->uwb_rc == NULL)) {
+ wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);
+ if (wusbhc->uwb_rc == NULL) {
+ result = -ENODEV;
+ dev_err(wusbhc->dev,
+ "Cannot get associated UWB Host Controller\n");
+ goto error_rc_get;
+ }
+
+ result = wusbhc_pal_register(wusbhc);
+ if (result < 0) {
+ dev_err(wusbhc->dev, "Cannot register as a UWB PAL\n");
+ goto error_pal_register;
+ }
+ }
mutex_unlock(&wusbhc->mutex);
if (chid)
result = uwb_radio_start(&wusbhc->pal);
- else
+ else if (wusbhc->uwb_rc)
uwb_radio_stop(&wusbhc->pal);
+
+ return result;
+
+error_pal_register:
+ uwb_rc_put(wusbhc->uwb_rc);
+ wusbhc->uwb_rc = NULL;
+error_rc_get:
+ mutex_unlock(&wusbhc->mutex);
+
return result;
}
EXPORT_SYMBOL_GPL(wusbhc_chid_set);
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
index d0b172c5ecc..090f27371a8 100644
--- a/drivers/usb/wusbcore/pal.c
+++ b/drivers/usb/wusbcore/pal.c
@@ -22,6 +22,7 @@ static void wusbhc_channel_changed(struct uwb_pal *pal, int channel)
{
struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal);
+ dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel);
if (channel < 0)
wusbhc_stop(wusbhc);
else
@@ -45,10 +46,11 @@ int wusbhc_pal_register(struct wusbhc *wusbhc)
}
/**
- * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL
+ * wusbhc_pal_unregister - unregister the WUSB HC as a UWB PAL
* @wusbhc: the WUSB HC
*/
void wusbhc_pal_unregister(struct wusbhc *wusbhc)
{
- uwb_pal_unregister(&wusbhc->pal);
+ if (wusbhc->uwb_rc)
+ uwb_pal_unregister(&wusbhc->pal);
}
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
index 4ed97360c04..d5efd0f07d2 100644
--- a/drivers/usb/wusbcore/reservation.c
+++ b/drivers/usb/wusbcore/reservation.c
@@ -51,6 +51,7 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
struct uwb_mas_bm mas;
char buf[72];
+ dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
uwb_rsv_get_usable_mas(rsv, &mas);
@@ -71,7 +72,7 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
/**
* wusbhc_rsv_establish - establish a reservation for the cluster
- * @wusbhc: the WUSB HC requesting a bandwith reservation
+ * @wusbhc: the WUSB HC requesting a bandwidth reservation
*/
int wusbhc_rsv_establish(struct wusbhc *wusbhc)
{
@@ -80,6 +81,9 @@ int wusbhc_rsv_establish(struct wusbhc *wusbhc)
struct uwb_dev_addr bcid;
int ret;
+ if (rc == NULL)
+ return -ENODEV;
+
rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
if (rsv == NULL)
return -ENOMEM;
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index a68ad7aa0b5..fe8bc777ab8 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -70,6 +70,7 @@
* wusbhc_rh_start_port_reset() ??? unimplemented
*/
#include <linux/slab.h>
+#include <linux/export.h>
#include "wusbhc.h"
/*
@@ -133,30 +134,38 @@ static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
* big of a problem [and we can't make it an spinlock
* because other parts need to take it and sleep] .
*
- * @usb_hcd is refcounted, so it won't dissapear under us
+ * @usb_hcd is refcounted, so it won't disappear under us
* and before killing a host, the polling of the root hub
* would be stopped anyway.
*/
int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- size_t cnt, size;
- unsigned long *buf = (unsigned long *) _buf;
+ size_t cnt, size, bits_set = 0;
/* WE DON'T LOCK, see comment */
- size = wusbhc->ports_max + 1 /* hub bit */;
- size = (size + 8 - 1) / 8; /* round to bytes */
- for (cnt = 0; cnt < wusbhc->ports_max; cnt++)
- if (wusb_port_by_idx(wusbhc, cnt)->change)
- set_bit(cnt + 1, buf);
- else
- clear_bit(cnt + 1, buf);
- return size;
+ /* round up to bytes. Hub bit is bit 0 so add 1. */
+ size = DIV_ROUND_UP(wusbhc->ports_max + 1, 8);
+
+ /* clear the output buffer. */
+ memset(_buf, 0, size);
+ /* set the bit for each changed port. */
+ for (cnt = 0; cnt < wusbhc->ports_max; cnt++) {
+
+ if (wusb_port_by_idx(wusbhc, cnt)->change) {
+ const int bitpos = cnt+1;
+
+ _buf[bitpos/8] |= (1 << (bitpos % 8));
+ bits_set++;
+ }
+ }
+
+ return bits_set ? size : 0;
}
EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
/*
- * Return the hub's desciptor
+ * Return the hub's descriptor
*
* NOTE: almost cut and paste from ehci-hub.c
*
@@ -184,8 +193,8 @@ static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
descr->bPwrOn2PwrGood = 0;
descr->bHubContrCurrent = 0;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&descr->bitmap[0], 0, temp);
- memset(&descr->bitmap[temp], 0xff, temp);
+ memset(&descr->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&descr->u.hs.DeviceRemovable[temp], 0xff, temp);
return 0;
}
@@ -392,26 +401,6 @@ int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
}
EXPORT_SYMBOL_GPL(wusbhc_rh_control);
-int wusbhc_rh_suspend(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, wusbhc);
- /* dump_stack(); */
- return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(wusbhc_rh_suspend);
-
-int wusbhc_rh_resume(struct usb_hcd *usb_hcd)
-{
- struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
- usb_hcd, wusbhc);
- /* dump_stack(); */
- return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(wusbhc_rh_resume);
-
int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index b60799b811c..95be9953cd4 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -26,21 +26,20 @@
#include <linux/slab.h>
#include <linux/usb/ch9.h>
#include <linux/random.h>
+#include <linux/export.h>
#include "wusbhc.h"
-static void wusbhc_set_gtk_callback(struct urb *urb);
-static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
+static void wusbhc_gtk_rekey_work(struct work_struct *work);
int wusbhc_sec_create(struct wusbhc *wusbhc)
{
- wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
+ wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) +
+ sizeof(wusbhc->gtk.data);
wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
wusbhc->gtk.descr.bReserved = 0;
+ wusbhc->gtk_index = 0;
- wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
- WUSB_KEY_INDEX_ORIGINATOR_HOST);
-
- INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
+ INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);
return 0;
}
@@ -58,7 +57,7 @@ void wusbhc_sec_destroy(struct wusbhc *wusbhc)
* @wusb_dev: the device whose PTK the TKID is for
* (or NULL for a TKID for a GTK)
*
- * The generated TKID consist of two parts: the device's authenicated
+ * The generated TKID consists of two parts: the device's authenticated
* address (or 0 or a GTK); and an incrementing number. This ensures
* that TKIDs cannot be shared between devices and by the time the
* incrementing number wraps around the older TKIDs will no longer be
@@ -112,7 +111,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
wusbhc_generate_gtk(wusbhc);
result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
- &wusbhc->gtk.descr.bKeyData, key_size);
+ &wusbhc->gtk.descr.bKeyData, key_size);
if (result < 0)
dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
result);
@@ -128,7 +127,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)
*/
void wusbhc_sec_stop(struct wusbhc *wusbhc)
{
- cancel_work_sync(&wusbhc->gtk_rekey_done_work);
+ cancel_work_sync(&wusbhc->gtk_rekey_work);
}
@@ -140,7 +139,7 @@ const char *wusb_et_name(u8 x)
case USB_ENC_TYPE_WIRED: return "wired";
case USB_ENC_TYPE_CCM_1: return "CCM-1";
case USB_ENC_TYPE_RSA_1: return "RSA-1";
- default: return "unknown";
+ default: return "unknown";
}
}
EXPORT_SYMBOL_GPL(wusb_et_name);
@@ -167,7 +166,7 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- value, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Can't set device's WUSB encryption to "
"%s (value %d): %d\n",
@@ -184,14 +183,16 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
{
struct usb_device *usb_dev = wusb_dev->usb_dev;
+ u8 key_index = wusb_key_index(wusbhc->gtk_index,
+ WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);
return usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- USB_DT_KEY << 8 | wusbhc->gtk_index, 0,
+ USB_DT_KEY << 8 | key_index, 0,
&wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
- 1000);
+ USB_CTRL_SET_TIMEOUT);
}
@@ -201,7 +202,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
{
int result, bytes, secd_size;
struct device *dev = &usb_dev->dev;
- struct usb_security_descriptor *secd;
+ struct usb_security_descriptor *secd, *new_secd;
const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
const void *itr, *top;
char buf[64];
@@ -220,11 +221,13 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
goto out;
}
secd_size = le16_to_cpu(secd->wTotalLength);
- secd = krealloc(secd, secd_size, GFP_KERNEL);
- if (secd == NULL) {
- dev_err(dev, "Can't allocate space for security descriptors\n");
+ new_secd = krealloc(secd, secd_size, GFP_KERNEL);
+ if (new_secd == NULL) {
+ dev_err(dev,
+ "Can't allocate space for security descriptors\n");
goto out;
}
+ secd = new_secd;
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
0, secd, secd_size);
if (result < secd_size) {
@@ -300,8 +303,9 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
/* Set address 0 */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS, 0,
- 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address 0: %d\n",
result);
@@ -315,9 +319,10 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
/* Set new (authenticated) address. */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS, 0,
- new_address, 0, NULL, 0,
- 1000 /* FIXME: arbitrary */);
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ new_address, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address %u: %d\n",
new_address, result);
@@ -353,7 +358,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct wusb_keydvt_in keydvt_in;
struct wusb_keydvt_out keydvt_out;
- hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
+ hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
if (hs == NULL) {
dev_err(dev, "can't allocate handshake data\n");
goto error_kzalloc;
@@ -374,13 +379,13 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
hs[0].bReserved = 0;
memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
- memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
+ memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
result = usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
+ 1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake1: request failed: %d\n", result);
goto error_hs1;
@@ -391,7 +396,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_GET_HANDSHAKE,
USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
+ 2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake2: request failed: %d\n", result);
goto error_hs2;
@@ -421,7 +426,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
}
/* Setup the CCM nonce */
- memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
+ memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
ccm_n.dest_addr.data[0] = wusb_dev->addr;
@@ -468,7 +473,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
+ 3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake3: request failed: %d\n", result);
goto error_hs3;
@@ -518,24 +523,57 @@ error_kzalloc:
* Once all connected and authenticated devices have received the new
* GTK, switch the host to using it.
*/
-static void wusbhc_gtk_rekey_done_work(struct work_struct *work)
+static void wusbhc_gtk_rekey_work(struct work_struct *work)
{
- struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work);
+ struct wusbhc *wusbhc = container_of(work,
+ struct wusbhc, gtk_rekey_work);
size_t key_size = sizeof(wusbhc->gtk.data);
+ int port_idx;
+ struct wusb_dev *wusb_dev, *wusb_dev_next;
+ LIST_HEAD(rekey_list);
mutex_lock(&wusbhc->mutex);
+ /* generate the new key */
+ wusbhc_generate_gtk(wusbhc);
+ /* roll the gtk index. */
+ wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1);
+ /*
+ * Save all connected devices on a list while holding wusbhc->mutex and
+ * take a reference to each one. Then submit the set key request to
+ * them after releasing the lock in order to avoid a deadlock.
+ */
+ for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) {
+ wusb_dev = wusbhc->port[port_idx].wusb_dev;
+ if (!wusb_dev || !wusb_dev->usb_dev
+ || !wusb_dev->usb_dev->authenticated)
+ continue;
- if (--wusbhc->pending_set_gtks == 0)
- wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
-
+ wusb_dev_get(wusb_dev);
+ list_add_tail(&wusb_dev->rekey_node, &rekey_list);
+ }
mutex_unlock(&wusbhc->mutex);
-}
-static void wusbhc_set_gtk_callback(struct urb *urb)
-{
- struct wusbhc *wusbhc = urb->context;
+ /* Submit the rekey requests without holding wusbhc->mutex. */
+ list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
+ rekey_node) {
+ list_del_init(&wusb_dev->rekey_node);
+ dev_dbg(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d\n",
+ __func__, wusb_dev->port_idx);
+
+ if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
+ dev_err(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d failed\n",
+ __func__, wusb_dev->port_idx);
+ }
+ wusb_dev_put(wusb_dev);
+ }
- queue_work(wusbd, &wusbhc->gtk_rekey_done_work);
+ /* Switch the host controller to use the new GTK. */
+ mutex_lock(&wusbhc->mutex);
+ wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
+ &wusbhc->gtk.descr.bKeyData, key_size);
+ mutex_unlock(&wusbhc->mutex);
}
/**
@@ -551,26 +589,12 @@ static void wusbhc_set_gtk_callback(struct urb *urb)
*/
void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
{
- static const size_t key_size = sizeof(wusbhc->gtk.data);
- int p;
-
- wusbhc_generate_gtk(wusbhc);
-
- for (p = 0; p < wusbhc->ports_max; p++) {
- struct wusb_dev *wusb_dev;
-
- wusb_dev = wusbhc->port[p].wusb_dev;
- if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated)
- continue;
-
- usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
- usb_sndctrlpipe(wusb_dev->usb_dev, 0),
- (void *)wusb_dev->set_gtk_req,
- &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
- wusbhc_set_gtk_callback, wusbhc);
- if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
- wusbhc->pending_set_gtks++;
- }
- if (wusbhc->pending_set_gtks == 0)
- wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
+ /*
+ * We need to submit a URB to the downstream WUSB devices in order to
+ * change the group key. This can't be done while holding the
+ * wusbhc->mutex since that is also taken in the urb_enqueue routine
+ * and will cause a deadlock. Instead, queue a work item to do
+ * it when the lock is not held
+ */
+ queue_work(wusbd, &wusbhc->gtk_rekey_work);
}
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 59a748a0e5d..252c7bd9218 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -23,6 +23,7 @@
* FIXME: docs
*/
#include <linux/slab.h>
+#include <linux/module.h>
#include "wusbhc.h"
#include "wa-hc.h"
@@ -32,7 +33,8 @@
* wa->usb_dev and wa->usb_iface initialized and refcounted,
* wa->wa_descr initialized.
*/
-int wa_create(struct wahc *wa, struct usb_interface *iface)
+int wa_create(struct wahc *wa, struct usb_interface *iface,
+ kernel_ulong_t quirks)
{
int result;
struct device *dev = &iface->dev;
@@ -40,13 +42,16 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
+ wa->quirks = quirks;
/* Fill up Data Transfer EP pointers */
wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
- wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
- wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
- if (wa->xfer_result == NULL)
- goto error_xfer_result_alloc;
+ wa->dti_buf_size = usb_endpoint_maxp(wa->dti_epd);
+ wa->dti_buf = kmalloc(wa->dti_buf_size, GFP_KERNEL);
+ if (wa->dti_buf == NULL) {
+ result = -ENOMEM;
+ goto error_dti_buf_alloc;
+ }
result = wa_nep_create(wa, iface);
if (result < 0) {
dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
@@ -56,8 +61,8 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)
return 0;
error_nep_create:
- kfree(wa->xfer_result);
-error_xfer_result_alloc:
+ kfree(wa->dti_buf);
+error_dti_buf_alloc:
wa_rpipes_destroy(wa);
error_rpipes_create:
return result;
@@ -70,10 +75,8 @@ void __wa_destroy(struct wahc *wa)
if (wa->dti_urb) {
usb_kill_urb(wa->dti_urb);
usb_put_urb(wa->dti_urb);
- usb_kill_urb(wa->buf_in_urb);
- usb_put_urb(wa->buf_in_urb);
}
- kfree(wa->xfer_result);
+ kfree(wa->dti_buf);
wa_nep_destroy(wa);
wa_rpipes_destroy(wa);
}
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index d6bea3e0b54..f2a8d29e17b 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -36,7 +36,7 @@
*
* hcd glue with the USB API Host Controller Interface API.
*
- * nep Notification EndPoint managent: collect notifications
+ * nep Notification EndPoint management: collect notifications
* and queue them with the workqueue daemon.
*
* Handle notifications as coming from the NEP. Sends them
@@ -91,6 +91,7 @@
struct wusbhc;
struct wahc;
extern void wa_urb_enqueue_run(struct work_struct *ws);
+extern void wa_process_errored_transfers_run(struct work_struct *ws);
/**
* RPipe instance
@@ -116,11 +117,38 @@ struct wa_rpipe {
struct wahc *wa;
spinlock_t seg_lock;
struct list_head seg_list;
+ struct list_head list_node;
atomic_t segs_available;
u8 buffer[1]; /* For reads/writes on USB */
};
+enum wa_dti_state {
+ WA_DTI_TRANSFER_RESULT_PENDING,
+ WA_DTI_ISOC_PACKET_STATUS_PENDING,
+ WA_DTI_BUF_IN_DATA_PENDING
+};
+
+enum wa_quirks {
+ /*
+ * The Alereon HWA expects the data frames in isochronous transfer
+ * requests to be concatenated and not sent as separate packets.
+ */
+ WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
+ /*
+ * The Alereon HWA can be instructed to not send transfer notifications
+ * as an optimization.
+ */
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02,
+};
+
+enum wa_vendor_specific_requests {
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C,
+ WA_REQ_ALEREON_FEATURE_SET = 0x01,
+ WA_REQ_ALEREON_FEATURE_CLEAR = 0x00,
+};
+
+#define WA_MAX_BUF_IN_URBS 4
/**
* Instance of a HWA Host Controller
*
@@ -129,7 +157,7 @@ struct wa_rpipe {
*
* @wa_descr Can be accessed without locking because it is in
* the same area where the device descriptors were
- * read, so it is guaranteed to exist umodified while
+ * read, so it is guaranteed to exist unmodified while
* the device exists.
*
* Endianess has been converted to CPU's.
@@ -152,8 +180,8 @@ struct wa_rpipe {
* submitted from an atomic context).
*
* FIXME: this needs to be layered up: a wusbhc layer (for sharing
- * comonalities with WHCI), a wa layer (for sharing
- * comonalities with DWA-RC).
+ * commonalities with WHCI), a wa layer (for sharing
+ * commonalities with DWA-RC).
*/
struct wahc {
struct usb_device *usb_dev;
@@ -177,27 +205,51 @@ struct wahc {
u16 rpipes;
unsigned long *rpipe_bm; /* rpipe usage bitmap */
- spinlock_t rpipe_bm_lock; /* protect rpipe_bm */
+ struct list_head rpipe_delayed_list; /* delayed RPIPES. */
+ spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */
struct mutex rpipe_mutex; /* assigning resources to endpoints */
+ /*
+ * dti_state is used to track the state of the dti_urb. When dti_state
+ * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
+ * dti_isoc_xfer_seg identify which xfer the incoming isoc packet
+ * status refers to.
+ */
+ enum wa_dti_state dti_state;
+ u32 dti_isoc_xfer_in_progress;
+ u8 dti_isoc_xfer_seg;
struct urb *dti_urb; /* URB for reading xfer results */
- struct urb *buf_in_urb; /* URB for reading data in */
+ /* URBs for reading data in */
+ struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS];
+ int active_buf_in_urbs; /* number of buf_in_urbs active. */
struct edc dti_edc; /* DTI error density counter */
- struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */
- size_t xfer_result_size;
+ void *dti_buf;
+ size_t dti_buf_size;
+
+ unsigned long dto_in_use; /* protect dto endoint serialization */
s32 status; /* For reading status */
struct list_head xfer_list;
struct list_head xfer_delayed_list;
+ struct list_head xfer_errored_list;
+ /*
+ * lock for the above xfer lists. Can be taken while a xfer->lock is
+ * held but not in the reverse order.
+ */
spinlock_t xfer_list_lock;
- struct work_struct xfer_work;
+ struct work_struct xfer_enqueue_work;
+ struct work_struct xfer_error_work;
atomic_t xfer_id_count;
+
+ kernel_ulong_t quirks;
};
-extern int wa_create(struct wahc *wa, struct usb_interface *iface);
+extern int wa_create(struct wahc *wa, struct usb_interface *iface,
+ kernel_ulong_t);
extern void __wa_destroy(struct wahc *wa);
+extern int wa_dti_start(struct wahc *wa);
void wa_reset_all(struct wahc *wa);
@@ -232,21 +284,32 @@ static inline void wa_nep_disarm(struct wahc *wa)
/* RPipes */
static inline void wa_rpipe_init(struct wahc *wa)
{
- spin_lock_init(&wa->rpipe_bm_lock);
+ INIT_LIST_HEAD(&wa->rpipe_delayed_list);
+ spin_lock_init(&wa->rpipe_lock);
mutex_init(&wa->rpipe_mutex);
}
static inline void wa_init(struct wahc *wa)
{
+ int index;
+
edc_init(&wa->nep_edc);
atomic_set(&wa->notifs_queued, 0);
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
wa_rpipe_init(wa);
edc_init(&wa->dti_edc);
INIT_LIST_HEAD(&wa->xfer_list);
INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ INIT_LIST_HEAD(&wa->xfer_errored_list);
spin_lock_init(&wa->xfer_list_lock);
- INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
+ wa->dto_in_use = 0;
atomic_set(&wa->xfer_id_count, 1);
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
+ usb_init_urb(&(wa->buf_in_urbs[index]));
+ wa->active_buf_in_urbs = 0;
}
/**
@@ -269,6 +332,8 @@ static inline void rpipe_put(struct wa_rpipe *rpipe)
}
extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern void rpipe_clear_feature_stalled(struct wahc *,
+ struct usb_host_endpoint *);
extern int wa_rpipes_create(struct wahc *);
extern void wa_rpipes_destroy(struct wahc *);
static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
@@ -289,7 +354,7 @@ static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
/* Transferring data */
extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
struct urb *, gfp_t);
-extern int wa_urb_dequeue(struct wahc *, struct urb *);
+extern int wa_urb_dequeue(struct wahc *, struct urb *, int);
extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
@@ -302,7 +367,7 @@ extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
* it...no RC specific function is called...unless I miss
* something.
*
- * FIXME: has to go away in favour of an 'struct' hcd based sollution
+ * FIXME: has to go away in favour of a 'struct' hcd based solution
*/
static inline struct wahc *wa_get(struct wahc *wa)
{
@@ -323,7 +388,7 @@ static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
feature,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
@@ -357,8 +422,7 @@ s32 __wa_get_status(struct wahc *wa)
USB_REQ_GET_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- &wa->status, sizeof(wa->status),
- 1000 /* FIXME: arbitrary */);
+ &wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT);
if (result >= 0)
result = wa->status;
return result;
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index f67f7f1e6df..60a10d21947 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -69,8 +69,8 @@ struct wa_notif_work {
* [the wuswad daemon, basically]
*
* @_nw: Pointer to a descriptor which has the pointer to the
- * @wa, the size of the buffer and the work queue
- * structure (so we can free all when done).
+ * @wa, the size of the buffer and the work queue
+ * structure (so we can free all when done).
* @returns 0 if ok, < 0 errno code on error.
*
* All notifications follow the same format; they need to start with a
@@ -93,7 +93,8 @@ static void wa_notif_dispatch(struct work_struct *ws)
{
void *itr;
u8 missing = 0;
- struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
+ struct wa_notif_work *nw = container_of(ws, struct wa_notif_work,
+ work);
struct wahc *wa = nw->wa;
struct wa_notif_hdr *notif_hdr;
size_t size;
@@ -134,9 +135,10 @@ static void wa_notif_dispatch(struct work_struct *ws)
case WA_NOTIF_TRANSFER:
wa_handle_notif_xfer(wa, notif_hdr);
break;
+ case HWA_NOTIF_BPST_ADJ:
+ break; /* no action needed for BPST ADJ. */
case DWA_NOTIF_RWAKE:
case DWA_NOTIF_PORTSTATUS:
- case HWA_NOTIF_BPST_ADJ:
/* FIXME: unimplemented WA NOTIFs */
/* fallthru */
default:
@@ -270,7 +272,8 @@ int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
wa->nep_buffer_size = 1024;
wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
if (wa->nep_buffer == NULL) {
- dev_err(dev, "Unable to allocate notification's read buffer\n");
+ dev_err(dev,
+ "Unable to allocate notification's read buffer\n");
goto error_nep_buffer;
}
wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index c7b1d8108de..a80c5d284b5 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -24,7 +24,7 @@
*
* RPIPE
*
- * Targetted at different downstream endpoints
+ * Targeted at different downstream endpoints
*
* Descriptor: use to config the remote pipe.
*
@@ -49,7 +49,7 @@
*
* USB Stack port number 4 (1 based)
* WUSB code port index 3 (0 based)
- * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub)
+ * USB Address 5 (2 based -- 0 is for default, 1 for root hub)
*
* Now, because we don't use the concept as default address exactly
* like the (wired) USB code does, we need to kind of skip it. So we
@@ -57,10 +57,10 @@
* urb->dev->devnum, to make sure that we always have the right
* destination address.
*/
-#include <linux/init.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "wusbhc.h"
#include "wa-hc.h"
@@ -79,7 +79,7 @@ static int __rpipe_get_descr(struct wahc *wa,
USB_REQ_GET_DESCRIPTOR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- 1000 /* FIXME: arbitrary */);
+ USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
index, (int)result);
@@ -117,7 +117,7 @@ static int __rpipe_set_descr(struct wahc *wa,
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- HZ / 10);
+ USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
index, (int)result);
@@ -142,17 +142,18 @@ static void rpipe_init(struct wa_rpipe *rpipe)
kref_init(&rpipe->refcnt);
spin_lock_init(&rpipe->seg_lock);
INIT_LIST_HEAD(&rpipe->seg_list);
+ INIT_LIST_HEAD(&rpipe->list_node);
}
static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
- spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
if (rpipe_idx < wa->rpipes)
set_bit(rpipe_idx, wa->rpipe_bm);
- spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
return rpipe_idx;
}
@@ -161,9 +162,9 @@ static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
- spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
clear_bit(rpipe_idx, wa->rpipe_bm);
- spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
}
void rpipe_destroy(struct kref *_rpipe)
@@ -182,7 +183,7 @@ EXPORT_SYMBOL_GPL(rpipe_destroy);
/*
* Locate an idle rpipe, create an structure for it and return it
*
- * @wa is referenced and unlocked
+ * @wa is referenced and unlocked
* @crs enum rpipe_attr, required endpoint characteristics
*
* The rpipe can be used only sequentially (not in parallel).
@@ -235,7 +236,7 @@ static int __rpipe_reset(struct wahc *wa, unsigned index)
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_RESET,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "rpipe %u: reset failed: %d\n",
index, result);
@@ -250,8 +251,8 @@ static int __rpipe_reset(struct wahc *wa, unsigned index)
static struct usb_wireless_ep_comp_descriptor epc0 = {
.bLength = sizeof(epc0),
.bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
-/* .bMaxBurst = 1, */
- .bMaxSequence = 31,
+ .bMaxBurst = 1,
+ .bMaxSequence = 2,
};
/*
@@ -297,7 +298,7 @@ static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
break;
}
itr += hdr->bLength;
- itr_size -= hdr->bDescriptorType;
+ itr_size -= hdr->bLength;
}
out:
return epcd;
@@ -306,7 +307,7 @@ out:
/*
* Aim an rpipe to its device & endpoint destination
*
- * Make sure we change the address to unauthenticathed if the device
+ * Make sure we change the address to unauthenticated if the device
* is WUSB and it is not authenticated.
*/
static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
@@ -316,6 +317,7 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
struct device *dev = &wa->usb_iface->dev;
struct usb_device *usb_dev = urb->dev;
struct usb_wireless_ep_comp_descriptor *epcd;
+ u32 ack_window, epcd_max_sequence;
u8 unauth;
epcd = rpipe_epc_find(dev, ep);
@@ -326,14 +328,21 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
}
unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
__rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
- atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
+ atomic_set(&rpipe->segs_available,
+ le16_to_cpu(rpipe->descr.wRequests));
/* FIXME: block allocation system; request with queuing and timeout */
/* FIXME: compute so seg_size > ep->maxpktsize */
rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
/* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
- rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
- rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
- rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
+ else
+ rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
+
+ rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,
+ epcd->bMaxBurst, 16U), 1U);
+ rpipe->descr.hwa_bDeviceInfoIndex =
+ wusb_port_no_to_idx(urb->dev->portnum);
/* FIXME: use maximum speed as supported or recommended by device */
rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
@@ -343,26 +352,28 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
le16_to_cpu(rpipe->descr.wRPipeIndex),
usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
- /* see security.c:wusb_update_address() */
- if (unlikely(urb->dev->devnum == 0x80))
- rpipe->descr.bDeviceAddress = 0;
- else
- rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
+ rpipe->descr.hwa_reserved = 0;
+
rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
/* FIXME: bDataSequence */
rpipe->descr.bDataSequence = 0;
- /* FIXME: dwCurrentWindow */
- rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
- /* FIXME: bMaxDataSequence */
- rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
+
+ /* start with base window of hwa_bMaxBurst bits starting at 0. */
+ ack_window = 0xFFFFFFFF >> (32 - rpipe->descr.hwa_bMaxBurst);
+ rpipe->descr.dwCurrentWindow = cpu_to_le32(ack_window);
+ epcd_max_sequence = max(min_t(unsigned int,
+ epcd->bMaxSequence, 32U), 2U);
+ rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1;
rpipe->descr.bInterval = ep->desc.bInterval;
- /* FIXME: bOverTheAirInterval */
- rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ rpipe->descr.bOverTheAirInterval = epcd->bOverTheAirInterval;
+ else
+ rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
/* FIXME: xmit power & preamble blah blah */
- rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
+ rpipe->descr.bmAttribute = (ep->desc.bmAttributes &
+ USB_ENDPOINT_XFERTYPE_MASK);
/* rpipe->descr.bmCharacteristics RO */
- /* FIXME: bmRetryOptions */
- rpipe->descr.bmRetryOptions = 15;
+ rpipe->descr.bmRetryOptions = (wa->wusb->retry_count & 0xF);
/* FIXME: use for assessing link quality? */
rpipe->descr.wNumTransactionErrors = 0;
result = __rpipe_set_descr(wa, &rpipe->descr,
@@ -386,10 +397,8 @@ static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
const struct usb_host_endpoint *ep,
const struct urb *urb, gfp_t gfp)
{
- int result = 0; /* better code for lack of companion? */
+ int result = 0;
struct device *dev = &wa->usb_iface->dev;
- struct usb_device *usb_dev = urb->dev;
- u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
#define AIM_CHECK(rdf, val, text) \
@@ -402,13 +411,10 @@ static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
WARN_ON(1); \
} \
} while (0)
- AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
- "(%u vs %u)");
- AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
+ AIM_CHECK(hwa_bDeviceInfoIndex, portnum, "(%u vs %u)");
AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
"(%u vs %u)");
- AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
@@ -477,7 +483,7 @@ error:
*/
int wa_rpipes_create(struct wahc *wa)
{
- wa->rpipes = wa->wa_descr->wNumRPipes;
+ wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
GFP_KERNEL);
if (wa->rpipe_bm == NULL)
@@ -518,12 +524,32 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
usb_control_msg(
- wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_ABORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
rpipe_put(rpipe);
}
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+
+/* Clear the stalled status of an RPIPE. */
+void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+ struct wa_rpipe *rpipe;
+
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ usb_control_msg(
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ }
+ mutex_unlock(&wa->rpipe_mutex);
+}
+EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 84b744c428a..3e2e4ed2015 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -61,7 +61,7 @@
*
* Two methods it could be done:
*
- * (a) set up a timer everytime an rpipe's use count drops to 1
+ * (a) set up a timer every time an rpipe's use count drops to 1
* (which means unused) or when a transfer ends. Reset the
* timer when a xfer is queued. If the timer expires, release
* the rpipe [see rpipe_ep_disable()].
@@ -79,16 +79,19 @@
* availability of the different required components (blocks,
* rpipes, segment slots, etc), we go scheduling them. Painful.
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/hash.h>
+#include <linux/ratelimit.h>
+#include <linux/export.h>
+#include <linux/scatterlist.h>
#include "wa-hc.h"
#include "wusbhc.h"
enum {
- WA_SEGS_MAX = 255,
+ /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
+ WA_SEGS_MAX = 128,
};
enum wa_seg_status {
@@ -104,6 +107,7 @@ enum wa_seg_status {
};
static void wa_xfer_delayed_run(struct wa_rpipe *);
+static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
/*
* Life cycle governed by 'struct urb' (the refcount of the struct is
@@ -111,21 +115,29 @@ static void wa_xfer_delayed_run(struct wa_rpipe *);
* struct).
*/
struct wa_seg {
- struct urb urb;
- struct urb *dto_urb; /* for data output? */
+ struct urb tr_urb; /* transfer request urb. */
+ struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
+ struct urb *dto_urb; /* for data output. */
struct list_head list_node; /* for rpipe->req_list */
struct wa_xfer *xfer; /* out xfer */
u8 index; /* which segment we are */
+ int isoc_frame_count; /* number of isoc frames in this segment. */
+ int isoc_frame_offset; /* starting frame offset in the xfer URB. */
+ /* Isoc frame that the current transfer buffer corresponds to. */
+ int isoc_frame_index;
+ int isoc_size; /* size of all isoc frames sent by this seg. */
enum wa_seg_status status;
ssize_t result; /* bytes xfered or error */
struct wa_xfer_hdr xfer_hdr;
- u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
};
-static void wa_seg_init(struct wa_seg *seg)
+static inline void wa_seg_init(struct wa_seg *seg)
{
- /* usb_init_urb() repeats a lot of work, so we do it here */
- kref_init(&seg->urb.kref);
+ usb_init_urb(&seg->tr_urb);
+
+ /* set the remaining memory to 0. */
+ memset(((void *)seg) + sizeof(seg->tr_urb), 0,
+ sizeof(*seg) - sizeof(seg->tr_urb));
}
/*
@@ -140,7 +152,7 @@ struct wa_xfer {
struct wahc *wa; /* Wire adapter we are plugged to */
struct usb_host_endpoint *ep;
- struct urb *urb; /* URB we are transfering for */
+ struct urb *urb; /* URB we are transferring for */
struct wa_seg **seg; /* transfer segments */
u8 segs, segs_submitted, segs_done;
unsigned is_inbound:1;
@@ -153,6 +165,11 @@ struct wa_xfer {
struct wusb_dev *wusb_dev; /* for activity timestamps */
};
+static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
+ struct wa_seg *seg, int curr_iso_frame);
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ int starting_index, enum wa_seg_status status);
+
static inline void wa_xfer_init(struct wa_xfer *xfer)
{
kref_init(&xfer->refcnt);
@@ -161,10 +178,10 @@ static inline void wa_xfer_init(struct wa_xfer *xfer)
}
/*
- * Destory a transfer structure
+ * Destroy a transfer structure
*
- * Note that the xfer->seg[index] thingies follow the URB life cycle,
- * so we need to put them, not free them.
+ * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
+ * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
*/
static void wa_xfer_destroy(struct kref *_xfer)
{
@@ -172,10 +189,17 @@ static void wa_xfer_destroy(struct kref *_xfer)
if (xfer->seg) {
unsigned cnt;
for (cnt = 0; cnt < xfer->segs; cnt++) {
- if (xfer->is_inbound)
- usb_put_urb(xfer->seg[cnt]->dto_urb);
- usb_put_urb(&xfer->seg[cnt]->urb);
+ struct wa_seg *seg = xfer->seg[cnt];
+ if (seg) {
+ usb_free_urb(seg->isoc_pack_desc_urb);
+ if (seg->dto_urb) {
+ kfree(seg->dto_urb->sg);
+ usb_free_urb(seg->dto_urb);
+ }
+ usb_free_urb(&seg->tr_urb);
+ }
}
+ kfree(xfer->seg);
}
kfree(xfer);
}
@@ -191,6 +215,59 @@ static void wa_xfer_put(struct wa_xfer *xfer)
}
/*
+ * Try to get exclusive access to the DTO endpoint resource. Return true
+ * if successful.
+ */
+static inline int __wa_dto_try_get(struct wahc *wa)
+{
+ return (test_and_set_bit(0, &wa->dto_in_use) == 0);
+}
+
+/* Release the DTO endpoint resource. */
+static inline void __wa_dto_put(struct wahc *wa)
+{
+ clear_bit_unlock(0, &wa->dto_in_use);
+}
+
+/* Service RPIPEs that are waiting on the DTO resource. */
+static void wa_check_for_delayed_rpipes(struct wahc *wa)
+{
+ unsigned long flags;
+ int dto_waiting = 0;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
+ rpipe = list_first_entry(&wa->rpipe_delayed_list,
+ struct wa_rpipe, list_node);
+ __wa_xfer_delayed_run(rpipe, &dto_waiting);
+ /* remove this RPIPE from the list if it is not waiting. */
+ if (!dto_waiting) {
+ pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
+ __func__,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ list_del_init(&rpipe->list_node);
+ }
+ }
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+}
+
+/* add this RPIPE to the end of the delayed RPIPE list. */
+static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ /* add rpipe to the list if it is not already on it. */
+ if (list_empty(&rpipe->list_node)) {
+ pr_debug("%s: adding RPIPE %d to the delayed list.\n",
+ __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
+ list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
+ }
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+}
+
+/*
* xfer is referenced
*
* xfer->lock has to be unlocked
@@ -206,6 +283,7 @@ static void wa_xfer_giveback(struct wa_xfer *xfer)
spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
list_del_init(&xfer->list_node);
+ usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
/* FIXME: segmentation broken -- kills DWA */
wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
@@ -227,6 +305,31 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
}
/*
+ * Initialize a transfer's ID
+ *
+ * We need to use a sequential number; if we use the pointer or the
+ * hash of the pointer, it can repeat over sequential transfers and
+ * then it will confuse the HWA....wonder why in hell they put a 32
+ * bit handle in there then.
+ */
+static void wa_xfer_id_init(struct wa_xfer *xfer)
+{
+ xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
+}
+
+/* Return the xfer's ID. */
+static inline u32 wa_xfer_id(struct wa_xfer *xfer)
+{
+ return xfer->id;
+}
+
+/* Return the xfer's ID in transport format (little endian). */
+static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
+{
+ return cpu_to_le32(xfer->id);
+}
+
+/*
* If transfer is done, wrap it up and return true
*
* xfer->lock has to be locked
@@ -248,33 +351,37 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
switch (seg->status) {
case WA_SEG_DONE:
if (found_short && seg->result > 0) {
- dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
- xfer, cnt, seg->result);
+ dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
+ xfer, wa_xfer_id(xfer), cnt,
+ seg->result);
urb->status = -EINVAL;
goto out;
}
urb->actual_length += seg->result;
- if (seg->result < xfer->seg_size
+ if (!(usb_pipeisoc(xfer->urb->pipe))
+ && seg->result < xfer->seg_size
&& cnt != xfer->segs-1)
found_short = 1;
- dev_dbg(dev, "xfer %p#%u: DONE short %d "
+ dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
"result %zu urb->actual_length %d\n",
- xfer, seg->index, found_short, seg->result,
- urb->actual_length);
+ xfer, wa_xfer_id(xfer), seg->index, found_short,
+ seg->result, urb->actual_length);
break;
case WA_SEG_ERROR:
xfer->result = seg->result;
- dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
- xfer, seg->index, seg->result);
+ dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
+ xfer, wa_xfer_id(xfer), seg->index, seg->result,
+ seg->result);
goto out;
case WA_SEG_ABORTED:
- dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
- xfer, seg->index, urb->status);
- xfer->result = urb->status;
+ xfer->result = seg->result;
+ dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
+ xfer, wa_xfer_id(xfer), seg->index, seg->result,
+ seg->result);
goto out;
default:
- dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
- xfer, cnt, seg->status);
+ dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
+ xfer, wa_xfer_id(xfer), cnt, seg->status);
xfer->result = -EINVAL;
goto out;
}
@@ -285,26 +392,21 @@ out:
}
/*
- * Initialize a transfer's ID
+ * Mark the given segment as done. Return true if this completes the xfer.
+ * This should only be called for segs that have been submitted to an RPIPE.
+ * Delayed segs are not marked as submitted so they do not need to be marked
+ * as done when cleaning up.
*
- * We need to use a sequential number; if we use the pointer or the
- * hash of the pointer, it can repeat over sequential transfers and
- * then it will confuse the HWA....wonder why in hell they put a 32
- * bit handle in there then.
+ * xfer->lock has to be locked
*/
-static void wa_xfer_id_init(struct wa_xfer *xfer)
+static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
+ struct wa_seg *seg, enum wa_seg_status status)
{
- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
-}
+ seg->status = status;
+ xfer->segs_done++;
-/*
- * Return the xfer's ID associated with xfer
- *
- * Need to generate a
- */
-static u32 wa_xfer_id(struct wa_xfer *xfer)
-{
- return xfer->id;
+ /* check for done. */
+ return __wa_xfer_is_done(xfer);
}
/*
@@ -334,12 +436,51 @@ out:
struct wa_xfer_abort_buffer {
struct urb urb;
+ struct wahc *wa;
struct wa_xfer_abort cmd;
};
static void __wa_xfer_abort_cb(struct urb *urb)
{
struct wa_xfer_abort_buffer *b = urb->context;
+ struct wahc *wa = b->wa;
+
+ /*
+ * If the abort request URB failed, then the HWA did not get the abort
+ * command. Forcibly clean up the xfer without waiting for a Transfer
+ * Result from the HWA.
+ */
+ if (urb->status < 0) {
+ struct wa_xfer *xfer;
+ struct device *dev = &wa->usb_iface->dev;
+
+ xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
+ dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
+ __func__, urb->status);
+ if (xfer) {
+ unsigned long flags;
+ int done;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
+ __func__, xfer, wa_xfer_id(xfer));
+ spin_lock_irqsave(&xfer->lock, flags);
+ /* mark all segs as aborted. */
+ wa_complete_remaining_xfer_segs(xfer, 0,
+ WA_SEG_ABORTED);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ } else {
+ dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
+ __func__, le32_to_cpu(b->cmd.dwTransferID));
+ }
+ }
+
+ wa_put(wa); /* taken in __wa_xfer_abort */
usb_put_urb(&b->urb);
}
@@ -351,15 +492,11 @@ static void __wa_xfer_abort_cb(struct urb *urb)
*
* The callback (see above) does nothing but freeing up the data by
* putting the URB. Because the URB is allocated at the head of the
- * struct, the whole space we allocated is kfreed.
- *
- * We'll get an 'aborted transaction' xfer result on DTI, that'll
- * politely ignore because at this point the transaction has been
- * marked as aborted already.
+ * struct, the whole space we allocated is kfreed. *
*/
-static void __wa_xfer_abort(struct wa_xfer *xfer)
+static int __wa_xfer_abort(struct wa_xfer *xfer)
{
- int result;
+ int result = -ENOMEM;
struct device *dev = &xfer->wa->usb_iface->dev;
struct wa_xfer_abort_buffer *b;
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
@@ -370,7 +507,8 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)
b->cmd.bLength = sizeof(b->cmd);
b->cmd.bRequestType = WA_XFER_ABORT;
b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
- b->cmd.dwTransferID = wa_xfer_id(xfer);
+ b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
+ b->wa = wa_get(xfer->wa);
usb_init_urb(&b->urb);
usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
@@ -380,20 +518,63 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)
result = usb_submit_urb(&b->urb, GFP_ATOMIC);
if (result < 0)
goto error_submit;
- return; /* callback frees! */
+ return result; /* callback frees! */
error_submit:
+ wa_put(xfer->wa);
if (printk_ratelimit())
dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
xfer, result);
kfree(b);
error_kmalloc:
- return;
+ return result;
}
/*
+ * Calculate the number of isoc frames starting from isoc_frame_offset
+ * that will fit a in transfer segment.
+ */
+static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
+ int isoc_frame_offset, int *total_size)
+{
+ int segment_size = 0, frame_count = 0;
+ int index = isoc_frame_offset;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+
+ while ((index < xfer->urb->number_of_packets)
+ && ((segment_size + iso_frame_desc[index].length)
+ <= xfer->seg_size)) {
+ /*
+ * For Alereon HWA devices, only include an isoc frame in an
+ * out segment if it is physically contiguous with the previous
+ * frame. This is required because those devices expect
+ * the isoc frames to be sent as a single USB transaction as
+ * opposed to one transaction per frame with standard HWA.
+ */
+ if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ && (xfer->is_inbound == 0)
+ && (index > isoc_frame_offset)
+ && ((iso_frame_desc[index - 1].offset +
+ iso_frame_desc[index - 1].length) !=
+ iso_frame_desc[index].offset))
+ break;
+
+ /* this frame fits. count it. */
+ ++frame_count;
+ segment_size += iso_frame_desc[index].length;
+
+ /* move to the next isoc frame. */
+ ++index;
+ }
+
+ *total_size = segment_size;
+ return frame_count;
+}
+
+/*
*
* @returns < 0 on error, transfer segment request size if ok
*/
@@ -417,44 +598,85 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
result = sizeof(struct wa_xfer_bi);
break;
case USB_ENDPOINT_XFER_ISOC:
- dev_err(dev, "FIXME: ISOC not implemented\n");
- result = -ENOSYS;
- goto error;
+ *pxfer_type = WA_XFER_TYPE_ISO;
+ result = sizeof(struct wa_xfer_hwaiso);
+ break;
default:
/* never happens */
BUG();
result = -EINVAL; /* shut gcc up */
- };
+ }
xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
+
+ maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
/* Compute the segment size and make sure it is a multiple of
* the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
* a check (FIXME) */
- maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
if (xfer->seg_size < maxpktsize) {
- dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
- "%zu\n", xfer->seg_size, maxpktsize);
+ dev_err(dev,
+ "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
+ xfer->seg_size, maxpktsize);
result = -EINVAL;
goto error;
}
xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
- xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
- / xfer->seg_size;
- if (xfer->segs >= WA_SEGS_MAX) {
- dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
- (int)(urb->transfer_buffer_length / xfer->seg_size),
+ if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
+ int index = 0;
+
+ xfer->segs = 0;
+ /*
+ * loop over urb->number_of_packets to determine how many
+ * xfer segments will be needed to send the isoc frames.
+ */
+ while (index < urb->number_of_packets) {
+ int seg_size; /* don't care. */
+ index += __wa_seg_calculate_isoc_frame_count(xfer,
+ index, &seg_size);
+ ++xfer->segs;
+ }
+ } else {
+ xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
+ xfer->seg_size);
+ if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
+ xfer->segs = 1;
+ }
+
+ if (xfer->segs > WA_SEGS_MAX) {
+ dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
+ (urb->transfer_buffer_length/xfer->seg_size),
WA_SEGS_MAX);
result = -EINVAL;
goto error;
}
- if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
- xfer->segs = 1;
error:
return result;
}
+static void __wa_setup_isoc_packet_descr(
+ struct wa_xfer_packet_info_hwaiso *packet_desc,
+ struct wa_xfer *xfer,
+ struct wa_seg *seg) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int frame_index;
+
+ /* populate isoc packet descriptor. */
+ packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
+ packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
+ (sizeof(packet_desc->PacketLength[0]) *
+ seg->isoc_frame_count));
+ for (frame_index = 0; frame_index < seg->isoc_frame_count;
+ ++frame_index) {
+ int offset_index = frame_index + seg->isoc_frame_offset;
+ packet_desc->PacketLength[frame_index] =
+ cpu_to_le16(iso_frame_desc[offset_index].length);
+ }
+}
+
+
/* Fill in the common request header and xfer-type specific data. */
static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
struct wa_xfer_hdr *xfer_hdr0,
@@ -462,12 +684,13 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
size_t xfer_hdr_size)
{
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+ struct wa_seg *seg = xfer->seg[0];
- xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
+ xfer_hdr0 = &seg->xfer_hdr;
xfer_hdr0->bLength = xfer_hdr_size;
xfer_hdr0->bRequestType = xfer_type;
xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
- xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
+ xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
xfer_hdr0->bTransferSegment = 0;
switch (xfer_type) {
case WA_XFER_TYPE_CTL: {
@@ -480,8 +703,18 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
}
case WA_XFER_TYPE_BI:
break;
- case WA_XFER_TYPE_ISO:
- printk(KERN_ERR "FIXME: ISOC not implemented\n");
+ case WA_XFER_TYPE_ISO: {
+ struct wa_xfer_hwaiso *xfer_iso =
+ container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
+ struct wa_xfer_packet_info_hwaiso *packet_desc =
+ ((void *)xfer_iso) + xfer_hdr_size;
+
+ /* populate the isoc section of the transfer request. */
+ xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
+ /* populate isoc packet descriptor. */
+ __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
+ break;
+ }
default:
BUG();
};
@@ -490,12 +723,12 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
/*
* Callback for the OUT data phase of the segment request
*
- * Check wa_seg_cb(); most comments also apply here because this
+ * Check wa_seg_tr_cb(); most comments also apply here because this
* function does almost the same thing and they work closely
* together.
*
- * If the seg request has failed but this DTO phase has suceeded,
- * wa_seg_cb() has already failed the segment and moved the
+ * If the seg request has failed but this DTO phase has succeeded,
+ * wa_seg_tr_cb() has already failed the segment and moved the
* status to WA_SEG_ERROR, so this will go through 'case 0' and
* effectively do nothing.
*/
@@ -508,6 +741,143 @@ static void wa_seg_dto_cb(struct urb *urb)
struct wa_rpipe *rpipe;
unsigned long flags;
unsigned rpipe_ready = 0;
+ int data_send_done = 1, release_dto = 0, holding_dto = 0;
+ u8 done = 0;
+ int result;
+
+ /* free the sg if it was used. */
+ kfree(urb->sg);
+ urb->sg = NULL;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /* Alereon HWA sends all isoc frames in a single transfer. */
+ if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ seg->isoc_frame_index += seg->isoc_frame_count;
+ else
+ seg->isoc_frame_index += 1;
+ if (seg->isoc_frame_index < seg->isoc_frame_count) {
+ data_send_done = 0;
+ holding_dto = 1; /* checked in error cases. */
+ /*
+ * if this is the last isoc frame of the segment, we
+ * can release DTO after sending this frame.
+ */
+ if ((seg->isoc_frame_index + 1) >=
+ seg->isoc_frame_count)
+ release_dto = 1;
+ }
+ dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
+ wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
+ holding_dto, release_dto);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ seg->result += urb->actual_length;
+ if (data_send_done) {
+ dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
+ wa_xfer_id(xfer), seg->index, seg->result);
+ if (seg->status < WA_SEG_PENDING)
+ seg->status = WA_SEG_PENDING;
+ } else {
+ /* should only hit this for isoc xfers. */
+ /*
+ * Populate the dto URB with the next isoc frame buffer,
+ * send the URB and release DTO if we no longer need it.
+ */
+ __wa_populate_dto_urb_isoc(xfer, seg,
+ seg->isoc_frame_offset + seg->isoc_frame_index);
+
+ /* resubmit the URB with the next isoc frame. */
+ /* take a ref on resubmit. */
+ wa_xfer_get(xfer);
+ result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
+ wa_xfer_id(xfer), seg->index, result);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ goto error_dto_submit;
+ }
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (release_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ if (holding_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ break;
+ default: /* Other errors ... */
+ dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
+ wa_xfer_id(xfer), seg->index, urb->status);
+ goto error_default;
+ }
+
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
+ return;
+
+error_dto_submit:
+ /* taken on resubmit attempt. */
+ wa_xfer_put(xfer);
+error_default:
+ spin_lock_irqsave(&xfer->lock, flags);
+ rpipe = xfer->ep->hcpriv;
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ if (seg->status != WA_SEG_ERROR) {
+ seg->result = urb->status;
+ __wa_xfer_abort(xfer);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (holding_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
+}
+
+/*
+ * Callback for the isoc packet descriptor phase of the segment request
+ *
+ * Check wa_seg_tr_cb(); most comments also apply here because this
+ * function does almost the same thing and they work closely
+ * together.
+ *
+ * If the seg request has failed but this phase has succeeded,
+ * wa_seg_tr_cb() has already failed the segment and moved the
+ * status to WA_SEG_ERROR, so this will go through 'case 0' and
+ * effectively do nothing.
+ */
+static void wa_seg_iso_pack_desc_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned long flags;
+ unsigned rpipe_ready = 0;
u8 done = 0;
switch (urb->status) {
@@ -515,11 +885,10 @@ static void wa_seg_dto_cb(struct urb *urb)
spin_lock_irqsave(&xfer->lock, flags);
wa = xfer->wa;
dev = &wa->usb_iface->dev;
- dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
- xfer, seg->index, urb->actual_length);
- if (seg->status < WA_SEG_PENDING)
+ dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
+ wa_xfer_id(xfer), seg->index);
+ if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
seg->status = WA_SEG_PENDING;
- seg->result = urb->actual_length;
spin_unlock_irqrestore(&xfer->lock, flags);
break;
case -ECONNRESET: /* URB unlinked; no need to do anything */
@@ -530,21 +899,20 @@ static void wa_seg_dto_cb(struct urb *urb)
wa = xfer->wa;
dev = &wa->usb_iface->dev;
rpipe = xfer->ep->hcpriv;
- dev_dbg(dev, "xfer %p#%u: data out error %d\n",
- xfer, seg->index, urb->status);
+ pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
+ wa_xfer_id(xfer), seg->index, urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "DTO: URB max acceptable errors "
- "exceeded, resetting device\n");
+ dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
wa_reset_all(wa);
}
if (seg->status != WA_SEG_ERROR) {
- seg->status = WA_SEG_ERROR;
+ usb_unlink_urb(seg->dto_urb);
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -552,6 +920,8 @@ static void wa_seg_dto_cb(struct urb *urb)
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -568,11 +938,11 @@ static void wa_seg_dto_cb(struct urb *urb)
* We have to check before setting the status to WA_SEG_PENDING
* because sometimes the xfer result callback arrives before this
* callback (geeeeeeze), so it might happen that we are already in
- * another state. As well, we don't set it if the transfer is inbound,
+ * another state. As well, we don't set it if the transfer is not inbound,
* as in that case, wa_seg_dto_cb will do it when the OUT data phase
* finishes.
*/
-static void wa_seg_cb(struct urb *urb)
+static void wa_seg_tr_cb(struct urb *urb)
{
struct wa_seg *seg = urb->context;
struct wa_xfer *xfer = seg->xfer;
@@ -588,8 +958,11 @@ static void wa_seg_cb(struct urb *urb)
spin_lock_irqsave(&xfer->lock, flags);
wa = xfer->wa;
dev = &wa->usb_iface->dev;
- dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
- if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
+ xfer, wa_xfer_id(xfer), seg->index);
+ if (xfer->is_inbound &&
+ seg->status < WA_SEG_PENDING &&
+ !(usb_pipeisoc(xfer->urb->pipe)))
seg->status = WA_SEG_PENDING;
spin_unlock_irqrestore(&xfer->lock, flags);
break;
@@ -602,27 +975,180 @@ static void wa_seg_cb(struct urb *urb)
dev = &wa->usb_iface->dev;
rpipe = xfer->ep->hcpriv;
if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: request error %d\n",
- xfer, seg->index, urb->status);
+ dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
dev_err(dev, "DTO: URB max acceptable errors "
"exceeded, resetting device\n");
wa_reset_all(wa);
}
+ usb_unlink_urb(seg->isoc_pack_desc_urb);
usb_unlink_urb(seg->dto_urb);
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
+}
+
+/*
+ * Allocate an SG list to store bytes_to_transfer bytes and copy the
+ * subset of the in_sg that matches the buffer subset
+ * we are about to transfer.
+ */
+static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
+ const unsigned int bytes_transferred,
+ const unsigned int bytes_to_transfer, int *out_num_sgs)
+{
+ struct scatterlist *out_sg;
+ unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
+ nents;
+ struct scatterlist *current_xfer_sg = in_sg;
+ struct scatterlist *current_seg_sg, *last_seg_sg;
+
+ /* skip previously transferred pages. */
+ while ((current_xfer_sg) &&
+ (bytes_processed < bytes_transferred)) {
+ bytes_processed += current_xfer_sg->length;
+
+ /* advance the sg if current segment starts on or past the
+ next page. */
+ if (bytes_processed <= bytes_transferred)
+ current_xfer_sg = sg_next(current_xfer_sg);
+ }
+
+ /* the data for the current segment starts in current_xfer_sg.
+ calculate the offset. */
+ if (bytes_processed > bytes_transferred) {
+ offset_into_current_page_data = current_xfer_sg->length -
+ (bytes_processed - bytes_transferred);
+ }
+
+ /* calculate the number of pages needed by this segment. */
+ nents = DIV_ROUND_UP((bytes_to_transfer +
+ offset_into_current_page_data +
+ current_xfer_sg->offset),
+ PAGE_SIZE);
+
+ out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
+ if (out_sg) {
+ sg_init_table(out_sg, nents);
+
+ /* copy the portion of the incoming SG that correlates to the
+ * data to be transferred by this segment to the segment SG. */
+ last_seg_sg = current_seg_sg = out_sg;
+ bytes_processed = 0;
+
+ /* reset nents and calculate the actual number of sg entries
+ needed. */
+ nents = 0;
+ while ((bytes_processed < bytes_to_transfer) &&
+ current_seg_sg && current_xfer_sg) {
+ unsigned int page_len = min((current_xfer_sg->length -
+ offset_into_current_page_data),
+ (bytes_to_transfer - bytes_processed));
+
+ sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
+ page_len,
+ current_xfer_sg->offset +
+ offset_into_current_page_data);
+
+ bytes_processed += page_len;
+
+ last_seg_sg = current_seg_sg;
+ current_seg_sg = sg_next(current_seg_sg);
+ current_xfer_sg = sg_next(current_xfer_sg);
+
+ /* only the first page may require additional offset. */
+ offset_into_current_page_data = 0;
+ nents++;
+ }
+
+ /* update num_sgs and terminate the list since we may have
+ * concatenated pages. */
+ sg_mark_end(last_seg_sg);
+ *out_num_sgs = nents;
+ }
+
+ return out_sg;
+}
+
+/*
+ * Populate DMA buffer info for the isoc dto urb.
+ */
+static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
+ struct wa_seg *seg, int curr_iso_frame)
+{
+ seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ /* dto urb buffer address pulled from iso_frame_desc. */
+ seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
+ xfer->urb->iso_frame_desc[curr_iso_frame].offset;
+ /* The Alereon HWA sends a single URB with all isoc segs. */
+ if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ seg->dto_urb->transfer_buffer_length = seg->isoc_size;
+ else
+ seg->dto_urb->transfer_buffer_length =
+ xfer->urb->iso_frame_desc[curr_iso_frame].length;
+}
+
+/*
+ * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
+ */
+static int __wa_populate_dto_urb(struct wa_xfer *xfer,
+ struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
+{
+ int result = 0;
+
+ if (xfer->is_dma) {
+ seg->dto_urb->transfer_dma =
+ xfer->urb->transfer_dma + buf_itr_offset;
+ seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ } else {
+ /* do buffer or SG processing. */
+ seg->dto_urb->transfer_flags &=
+ ~URB_NO_TRANSFER_DMA_MAP;
+ /* this should always be 0 before a resubmit. */
+ seg->dto_urb->num_mapped_sgs = 0;
+
+ if (xfer->urb->transfer_buffer) {
+ seg->dto_urb->transfer_buffer =
+ xfer->urb->transfer_buffer +
+ buf_itr_offset;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ } else {
+ seg->dto_urb->transfer_buffer = NULL;
+
+ /*
+ * allocate an SG list to store seg_size bytes
+ * and copy the subset of the xfer->urb->sg that
+ * matches the buffer subset we are about to
+ * read.
+ */
+ seg->dto_urb->sg = wa_xfer_create_subset_sg(
+ xfer->urb->sg,
+ buf_itr_offset, buf_itr_size,
+ &(seg->dto_urb->num_sgs));
+ if (!(seg->dto_urb->sg))
+ result = -ENOMEM;
+ }
+ }
+ seg->dto_urb->transfer_buffer_length = buf_itr_size;
+
+ return result;
}
/*
@@ -635,7 +1161,7 @@ static void wa_seg_cb(struct urb *urb)
*/
static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
{
- int result, cnt;
+ int result, cnt, isoc_frame_offset = 0;
size_t alloc_size = sizeof(*xfer->seg[0])
- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
struct usb_device *usb_dev = xfer->wa->usb_dev;
@@ -650,20 +1176,65 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
buf_itr = 0;
buf_size = xfer->urb->transfer_buffer_length;
for (cnt = 0; cnt < xfer->segs; cnt++) {
- seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
+ size_t iso_pkt_descr_size = 0;
+ int seg_isoc_frame_count = 0, seg_isoc_size = 0;
+
+ /*
+ * Adjust the size of the segment object to contain space for
+ * the isoc packet descriptor buffer.
+ */
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ seg_isoc_frame_count =
+ __wa_seg_calculate_isoc_frame_count(xfer,
+ isoc_frame_offset, &seg_isoc_size);
+
+ iso_pkt_descr_size =
+ sizeof(struct wa_xfer_packet_info_hwaiso) +
+ (seg_isoc_frame_count * sizeof(__le16));
+ }
+ seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
+ GFP_ATOMIC);
if (seg == NULL)
- goto error_seg_kzalloc;
+ goto error_seg_kmalloc;
wa_seg_init(seg);
seg->xfer = xfer;
seg->index = cnt;
- usb_fill_bulk_urb(&seg->urb, usb_dev,
+ usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
usb_sndbulkpipe(usb_dev,
dto_epd->bEndpointAddress),
&seg->xfer_hdr, xfer_hdr_size,
- wa_seg_cb, seg);
- buf_itr_size = buf_size > xfer->seg_size ?
- xfer->seg_size : buf_size;
+ wa_seg_tr_cb, seg);
+ buf_itr_size = min(buf_size, xfer->seg_size);
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ seg->isoc_frame_count = seg_isoc_frame_count;
+ seg->isoc_frame_offset = isoc_frame_offset;
+ seg->isoc_size = seg_isoc_size;
+ /* iso packet descriptor. */
+ seg->isoc_pack_desc_urb =
+ usb_alloc_urb(0, GFP_ATOMIC);
+ if (seg->isoc_pack_desc_urb == NULL)
+ goto error_iso_pack_desc_alloc;
+ /*
+ * The buffer for the isoc packet descriptor starts
+ * after the transfer request header in the
+ * segment object memory buffer.
+ */
+ usb_fill_bulk_urb(
+ seg->isoc_pack_desc_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ (void *)(&seg->xfer_hdr) +
+ xfer_hdr_size,
+ iso_pkt_descr_size,
+ wa_seg_iso_pack_desc_cb, seg);
+
+ /* adjust starting frame offset for next seg. */
+ isoc_frame_offset += seg_isoc_frame_count;
+ }
+
if (xfer->is_inbound == 0 && buf_size > 0) {
+ /* outbound data. */
seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (seg->dto_urb == NULL)
goto error_dto_alloc;
@@ -672,32 +1243,44 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
usb_sndbulkpipe(usb_dev,
dto_epd->bEndpointAddress),
NULL, 0, wa_seg_dto_cb, seg);
- if (xfer->is_dma) {
- seg->dto_urb->transfer_dma =
- xfer->urb->transfer_dma + buf_itr;
- seg->dto_urb->transfer_flags |=
- URB_NO_TRANSFER_DMA_MAP;
- } else
- seg->dto_urb->transfer_buffer =
- xfer->urb->transfer_buffer + buf_itr;
- seg->dto_urb->transfer_buffer_length = buf_itr_size;
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /*
+ * Fill in the xfer buffer information for the
+ * first isoc frame. Subsequent frames in this
+ * segment will be filled in and sent from the
+ * DTO completion routine, if needed.
+ */
+ __wa_populate_dto_urb_isoc(xfer, seg,
+ seg->isoc_frame_offset);
+ } else {
+ /* fill in the xfer buffer information. */
+ result = __wa_populate_dto_urb(xfer, seg,
+ buf_itr, buf_itr_size);
+ if (result < 0)
+ goto error_seg_outbound_populate;
+
+ buf_itr += buf_itr_size;
+ buf_size -= buf_itr_size;
+ }
}
seg->status = WA_SEG_READY;
- buf_itr += buf_itr_size;
- buf_size -= buf_itr_size;
}
return 0;
+ /*
+ * Free the memory for the current segment which failed to init.
+ * Use the fact that cnt is left at were it failed. The remaining
+ * segments will be cleaned up by wa_xfer_destroy.
+ */
+error_seg_outbound_populate:
+ usb_free_urb(xfer->seg[cnt]->dto_urb);
error_dto_alloc:
+ usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
+error_iso_pack_desc_alloc:
kfree(xfer->seg[cnt]);
- cnt--;
-error_seg_kzalloc:
- /* use the fact that cnt is left at were it failed */
- for (; cnt > 0; cnt--) {
- if (xfer->is_inbound == 0)
- kfree(xfer->seg[cnt]->dto_urb);
- kfree(xfer->seg[cnt]);
- }
+ xfer->seg[cnt] = NULL;
+error_seg_kmalloc:
error_segs_kzalloc:
return result;
}
@@ -735,21 +1318,50 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
wa_xfer_id_init(xfer);
__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
- /* Fill remainig headers */
+ /* Fill remaining headers */
xfer_hdr = xfer_hdr0;
- transfer_size = urb->transfer_buffer_length;
- xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
- xfer->seg_size : transfer_size;
- transfer_size -= xfer->seg_size;
- for (cnt = 1; cnt < xfer->segs; cnt++) {
- xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
- memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
- xfer_hdr->bTransferSegment = cnt;
- xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
- cpu_to_le32(xfer->seg_size)
- : cpu_to_le32(transfer_size);
- xfer->seg[cnt]->status = WA_SEG_READY;
+ if (xfer_type == WA_XFER_TYPE_ISO) {
+ xfer_hdr0->dwTransferLength =
+ cpu_to_le32(xfer->seg[0]->isoc_size);
+ for (cnt = 1; cnt < xfer->segs; cnt++) {
+ struct wa_xfer_packet_info_hwaiso *packet_desc;
+ struct wa_seg *seg = xfer->seg[cnt];
+ struct wa_xfer_hwaiso *xfer_iso;
+
+ xfer_hdr = &seg->xfer_hdr;
+ xfer_iso = container_of(xfer_hdr,
+ struct wa_xfer_hwaiso, hdr);
+ packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
+ /*
+ * Copy values from the 0th header. Segment specific
+ * values are set below.
+ */
+ memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+ xfer_hdr->bTransferSegment = cnt;
+ xfer_hdr->dwTransferLength =
+ cpu_to_le32(seg->isoc_size);
+ xfer_iso->dwNumOfPackets =
+ cpu_to_le32(seg->isoc_frame_count);
+ __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
+ seg->status = WA_SEG_READY;
+ }
+ } else {
+ transfer_size = urb->transfer_buffer_length;
+ xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
+ cpu_to_le32(xfer->seg_size) :
+ cpu_to_le32(transfer_size);
transfer_size -= xfer->seg_size;
+ for (cnt = 1; cnt < xfer->segs; cnt++) {
+ xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
+ memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+ xfer_hdr->bTransferSegment = cnt;
+ xfer_hdr->dwTransferLength =
+ transfer_size > xfer->seg_size ?
+ cpu_to_le32(xfer->seg_size)
+ : cpu_to_le32(transfer_size);
+ xfer->seg[cnt]->status = WA_SEG_READY;
+ transfer_size -= xfer->seg_size;
+ }
}
xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
result = 0;
@@ -764,70 +1376,161 @@ error_setup_sizes:
* rpipe->seg_lock is held!
*/
static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
- struct wa_seg *seg)
+ struct wa_seg *seg, int *dto_done)
{
int result;
- result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
+
+ /* default to done unless we encounter a multi-frame isoc segment. */
+ *dto_done = 1;
+
+ /*
+ * Take a ref for each segment urb so the xfer cannot disappear until
+ * all of the callbacks run.
+ */
+ wa_xfer_get(xfer);
+ /* submit the transfer request. */
+ seg->status = WA_SEG_SUBMITTED;
+ result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
if (result < 0) {
- printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
- xfer, seg->index, result);
- goto error_seg_submit;
+ pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
+ __func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
+ goto error_tr_submit;
}
+ /* submit the isoc packet descriptor if present. */
+ if (seg->isoc_pack_desc_urb) {
+ wa_xfer_get(xfer);
+ result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
+ seg->isoc_frame_index = 0;
+ if (result < 0) {
+ pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
+ __func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
+ goto error_iso_pack_desc_submit;
+ }
+ }
+ /* submit the out data if this is an out request. */
if (seg->dto_urb) {
+ struct wahc *wa = xfer->wa;
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
- printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
- xfer, seg->index, result);
+ pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
+ __func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
goto error_dto_submit;
}
+ /*
+ * If this segment contains more than one isoc frame, hold
+ * onto the dto resource until we send all frames.
+ * Only applies to non-Alereon devices.
+ */
+ if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
+ && (seg->isoc_frame_count > 1))
+ *dto_done = 0;
}
- seg->status = WA_SEG_SUBMITTED;
rpipe_avail_dec(rpipe);
return 0;
error_dto_submit:
- usb_unlink_urb(&seg->urb);
-error_seg_submit:
+ usb_unlink_urb(seg->isoc_pack_desc_urb);
+error_iso_pack_desc_submit:
+ usb_unlink_urb(&seg->tr_urb);
+error_tr_submit:
seg->status = WA_SEG_ERROR;
seg->result = result;
+ *dto_done = 1;
return result;
}
/*
- * Execute more queued request segments until the maximum concurrent allowed
+ * Execute more queued request segments until the maximum concurrent allowed.
+ * Return true if the DTO resource was acquired and released.
*
* The ugly unlock/lock sequence on the error path is needed as the
* xfer->lock normally nests the seg_lock and not viceversa.
- *
*/
-static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
+static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
{
- int result;
+ int result, dto_acquired = 0, dto_done = 0;
struct device *dev = &rpipe->wa->usb_iface->dev;
struct wa_seg *seg;
struct wa_xfer *xfer;
unsigned long flags;
+ *dto_waiting = 0;
+
spin_lock_irqsave(&rpipe->seg_lock, flags);
while (atomic_read(&rpipe->segs_available) > 0
- && !list_empty(&rpipe->seg_list)) {
- seg = list_entry(rpipe->seg_list.next, struct wa_seg,
+ && !list_empty(&rpipe->seg_list)
+ && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
+ seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
list_node);
list_del(&seg->list_node);
xfer = seg->xfer;
- result = __wa_seg_submit(rpipe, xfer, seg);
- dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
- xfer, seg->index, atomic_read(&rpipe->segs_available), result);
+ /*
+ * Get a reference to the xfer in case the callbacks for the
+ * URBs submitted by __wa_seg_submit attempt to complete
+ * the xfer before this function completes.
+ */
+ wa_xfer_get(xfer);
+ result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
+ /* release the dto resource if this RPIPE is done with it. */
+ if (dto_done)
+ __wa_dto_put(rpipe->wa);
+ dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ atomic_read(&rpipe->segs_available), result);
if (unlikely(result < 0)) {
+ int done;
+
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
spin_lock_irqsave(&xfer->lock, flags);
__wa_xfer_abort(xfer);
+ /*
+ * This seg was marked as submitted when it was put on
+ * the RPIPE seg_list. Mark it done.
+ */
xfer->segs_done++;
+ done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
spin_lock_irqsave(&rpipe->seg_lock, flags);
}
+ wa_xfer_put(xfer);
}
+ /*
+ * Mark this RPIPE as waiting if dto was not acquired, there are
+ * delayed segs and no active transfers to wake us up later.
+ */
+ if (!dto_acquired && !list_empty(&rpipe->seg_list)
+ && (atomic_read(&rpipe->segs_available) ==
+ le16_to_cpu(rpipe->descr.wRequests)))
+ *dto_waiting = 1;
+
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+
+ return dto_done;
+}
+
+static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
+{
+ int dto_waiting;
+ int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
+
+ /*
+ * If this RPIPE is waiting on the DTO resource, add it to the tail of
+ * the waiting list.
+ * Otherwise, if the WA DTO resource was acquired and released by
+ * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
+ * DTO and failed during that time. Check the delayed list and process
+ * any waiters. Start searching from the next RPIPE index.
+ */
+ if (dto_waiting)
+ wa_add_delayed_rpipe(rpipe->wa, rpipe);
+ else if (dto_done)
+ wa_check_for_delayed_rpipes(rpipe->wa);
}
/*
@@ -839,7 +1542,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
*/
static int __wa_xfer_submit(struct wa_xfer *xfer)
{
- int result;
+ int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
struct wahc *wa = xfer->wa;
struct device *dev = &wa->usb_iface->dev;
unsigned cnt;
@@ -858,27 +1561,58 @@ static int __wa_xfer_submit(struct wa_xfer *xfer)
result = 0;
spin_lock_irqsave(&rpipe->seg_lock, flags);
for (cnt = 0; cnt < xfer->segs; cnt++) {
+ int delay_seg = 1;
+
available = atomic_read(&rpipe->segs_available);
empty = list_empty(&rpipe->seg_list);
seg = xfer->seg[cnt];
- dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
- xfer, cnt, available, empty,
- available == 0 || !empty ? "delayed" : "submitted");
- if (available == 0 || !empty) {
- dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
+ if (available && empty) {
+ /*
+ * Only attempt to acquire DTO if we have a segment
+ * to send.
+ */
+ dto_acquired = __wa_dto_try_get(rpipe->wa);
+ if (dto_acquired) {
+ delay_seg = 0;
+ result = __wa_seg_submit(rpipe, xfer, seg,
+ &dto_done);
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
+ xfer, wa_xfer_id(xfer), cnt, available,
+ empty);
+ if (dto_done)
+ __wa_dto_put(rpipe->wa);
+
+ if (result < 0) {
+ __wa_xfer_abort(xfer);
+ goto error_seg_submit;
+ }
+ }
+ }
+
+ if (delay_seg) {
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
+ xfer, wa_xfer_id(xfer), cnt, available, empty);
seg->status = WA_SEG_DELAYED;
list_add_tail(&seg->list_node, &rpipe->seg_list);
- } else {
- result = __wa_seg_submit(rpipe, xfer, seg);
- if (result < 0) {
- __wa_xfer_abort(xfer);
- goto error_seg_submit;
- }
}
xfer->segs_submitted++;
}
error_seg_submit:
+ /*
+ * Mark this RPIPE as waiting if dto was not acquired, there are
+ * delayed segs and no active transfers to wake us up later.
+ */
+ if (!dto_acquired && !list_empty(&rpipe->seg_list)
+ && (atomic_read(&rpipe->segs_available) ==
+ le16_to_cpu(rpipe->descr.wRequests)))
+ dto_waiting = 1;
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+
+ if (dto_waiting)
+ wa_add_delayed_rpipe(rpipe->wa, rpipe);
+ else if (dto_done)
+ wa_check_for_delayed_rpipes(rpipe->wa);
+
return result;
}
@@ -904,7 +1638,7 @@ error_seg_submit:
* result never kicks in, the xfer will timeout from the USB code and
* dequeue() will be called.
*/
-static void wa_urb_enqueue_b(struct wa_xfer *xfer)
+static int wa_urb_enqueue_b(struct wa_xfer *xfer)
{
int result;
unsigned long flags;
@@ -915,18 +1649,23 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
unsigned done;
result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
- if (result < 0)
+ if (result < 0) {
+ pr_err("%s: error_rpipe_get\n", __func__);
goto error_rpipe_get;
+ }
result = -ENODEV;
/* FIXME: segmentation broken -- kills DWA */
mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
if (urb->dev == NULL) {
mutex_unlock(&wusbhc->mutex);
+ pr_err("%s: error usb dev gone\n", __func__);
goto error_dev_gone;
}
wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
if (wusb_dev == NULL) {
mutex_unlock(&wusbhc->mutex);
+ dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
+ __func__);
goto error_dev_gone;
}
mutex_unlock(&wusbhc->mutex);
@@ -934,21 +1673,35 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
spin_lock_irqsave(&xfer->lock, flags);
xfer->wusb_dev = wusb_dev;
result = urb->status;
- if (urb->status != -EINPROGRESS)
+ if (urb->status != -EINPROGRESS) {
+ dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
goto error_dequeued;
+ }
result = __wa_xfer_setup(xfer, urb);
- if (result < 0)
+ if (result < 0) {
+ dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
goto error_xfer_setup;
+ }
+ /*
+ * Get a xfer reference since __wa_xfer_submit starts asynchronous
+ * operations that may try to complete the xfer before this function
+ * exits.
+ */
+ wa_xfer_get(xfer);
result = __wa_xfer_submit(xfer);
- if (result < 0)
+ if (result < 0) {
+ dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
goto error_xfer_submit;
+ }
spin_unlock_irqrestore(&xfer->lock, flags);
- return;
+ wa_xfer_put(xfer);
+ return 0;
- /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
- * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
- * upundo setup().
+ /*
+ * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
+ * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
+ * setup().
*/
error_xfer_setup:
error_dequeued:
@@ -960,8 +1713,7 @@ error_dev_gone:
rpipe_put(xfer->ep->hcpriv);
error_rpipe_get:
xfer->result = result;
- wa_xfer_giveback(xfer);
- return;
+ return result;
error_xfer_submit:
done = __wa_xfer_is_done(xfer);
@@ -969,6 +1721,9 @@ error_xfer_submit:
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
+ wa_xfer_put(xfer);
+ /* return success since the completion routine will run. */
+ return 0;
}
/*
@@ -976,34 +1731,83 @@ error_xfer_submit:
*
* We need to be careful here, as dequeue() could be called in the
* middle. That's why we do the whole thing under the
- * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
+ * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
* and then checks the list -- so as we would be acquiring in inverse
- * order, we just drop the lock once we have the xfer and reacquire it
- * later.
+ * order, we move the delayed list to a separate list while locked and then
+ * submit them without the list lock held.
*/
void wa_urb_enqueue_run(struct work_struct *ws)
{
- struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+ struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
struct wa_xfer *xfer, *next;
struct urb *urb;
+ LIST_HEAD(tmp_list);
+ /* Create a copy of the wa->xfer_delayed_list while holding the lock */
spin_lock_irq(&wa->xfer_list_lock);
- list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
- list_node) {
+ list_cut_position(&tmp_list, &wa->xfer_delayed_list,
+ wa->xfer_delayed_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * enqueue from temp list without list lock held since wa_urb_enqueue_b
+ * can take xfer->lock as well as lock mutexes.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
list_del_init(&xfer->list_node);
- spin_unlock_irq(&wa->xfer_list_lock);
urb = xfer->urb;
- wa_urb_enqueue_b(xfer);
+ if (wa_urb_enqueue_b(xfer) < 0)
+ wa_xfer_giveback(xfer);
usb_put_urb(urb); /* taken when queuing */
-
- spin_lock_irq(&wa->xfer_list_lock);
}
- spin_unlock_irq(&wa->xfer_list_lock);
}
EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
/*
+ * Process the errored transfers on the Wire Adapter outside of interrupt.
+ */
+void wa_process_errored_transfers_run(struct work_struct *ws)
+{
+ struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
+ struct wa_xfer *xfer, *next;
+ LIST_HEAD(tmp_list);
+
+ pr_info("%s: Run delayed STALL processing.\n", __func__);
+
+ /* Create a copy of the wa->xfer_errored_list while holding the lock */
+ spin_lock_irq(&wa->xfer_list_lock);
+ list_cut_position(&tmp_list, &wa->xfer_errored_list,
+ wa->xfer_errored_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * run rpipe_clear_feature_stalled from temp list without list lock
+ * held.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ ep = xfer->ep;
+ rpipe = ep->hcpriv;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* clear RPIPE feature stalled without holding a lock. */
+ rpipe_clear_feature_stalled(wa, ep);
+
+ /* complete the xfer. This removes it from the tmp list. */
+ wa_xfer_completion(xfer);
+
+ /* check for work. */
+ wa_xfer_delayed_run(rpipe);
+ }
+}
+EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
+
+/*
* Submit a transfer to the Wire Adapter in a delayed way
*
* The process of enqueuing involves possible sleeps() [see
@@ -1024,13 +1828,20 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
unsigned long my_flags;
unsigned cant_sleep = irqs_disabled() | in_atomic();
- if (urb->transfer_buffer == NULL
+ if ((urb->transfer_buffer == NULL)
+ && (urb->sg == NULL)
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
&& urb->transfer_buffer_length != 0) {
dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
dump_stack();
}
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ if (result < 0)
+ goto error_link_urb;
+
result = -ENOMEM;
xfer = kzalloc(sizeof(*xfer), gfp);
if (xfer == NULL)
@@ -1057,15 +1868,34 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
- queue_work(wusbd, &wa->xfer_work);
+ queue_work(wusbd, &wa->xfer_enqueue_work);
} else {
- wa_urb_enqueue_b(xfer);
+ result = wa_urb_enqueue_b(xfer);
+ if (result < 0) {
+ /*
+ * URB submit/enqueue failed. Clean up, return an
+ * error and do not run the callback. This avoids
+ * an infinite submit/complete loop.
+ */
+ dev_err(dev, "%s: URB enqueue failed: %d\n",
+ __func__, result);
+ wa_put(xfer->wa);
+ wa_xfer_put(xfer);
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ return result;
+ }
}
return 0;
error_dequeued:
kfree(xfer);
error_kmalloc:
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+error_link_urb:
return result;
}
EXPORT_SYMBOL_GPL(wa_urb_enqueue);
@@ -1088,26 +1918,53 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue);
* asynch request] and then make sure we cancel each segment.
*
*/
-int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
+int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
{
unsigned long flags, flags2;
struct wa_xfer *xfer;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
- unsigned cnt;
+ unsigned cnt, done = 0, xfer_abort_pending;
unsigned rpipe_ready = 0;
+ int result;
- xfer = urb->hcpriv;
- if (xfer == NULL) {
- /* NOthing setup yet enqueue will see urb->status !=
- * -EINPROGRESS (by hcd layer) and bail out with
- * error, no need to do completion
+ /* check if it is safe to unlink. */
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
+ if ((result == 0) && urb->hcpriv) {
+ /*
+ * Get a xfer ref to prevent a race with wa_xfer_giveback
+ * cleaning up the xfer while we are working with it.
*/
- BUG_ON(urb->status == -EINPROGRESS);
- goto out;
+ wa_xfer_get(urb->hcpriv);
}
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+ if (result)
+ return result;
+
+ xfer = urb->hcpriv;
+ if (xfer == NULL)
+ return -ENOENT;
spin_lock_irqsave(&xfer->lock, flags);
+ pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
rpipe = xfer->ep->hcpriv;
+ if (rpipe == NULL) {
+ pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
+ __func__, xfer, wa_xfer_id(xfer),
+ "Probably already aborted.\n" );
+ result = -ENOENT;
+ goto out_unlock;
+ }
+ /*
+ * Check for done to avoid racing with wa_xfer_giveback and completing
+ * twice.
+ */
+ if (__wa_xfer_is_done(xfer)) {
+ pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
+ xfer, wa_xfer_id(xfer));
+ result = -ENOENT;
+ goto out_unlock;
+ }
/* Check the delayed list -> if there, release and complete */
spin_lock_irqsave(&wa->xfer_list_lock, flags2);
if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1116,9 +1973,16 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
if (xfer->seg == NULL) /* still hasn't reached */
goto out_unlock; /* setup(), enqueue_b() completes */
/* Ok, the xfer is in flight already, it's been setup and submitted.*/
- __wa_xfer_abort(xfer);
+ xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
+ /*
+ * grab the rpipe->seg_lock here to prevent racing with
+ * __wa_xfer_delayed_run.
+ */
+ spin_lock(&rpipe->seg_lock);
for (cnt = 0; cnt < xfer->segs; cnt++) {
seg = xfer->seg[cnt];
+ pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
+ __func__, wa_xfer_id(xfer), cnt, seg->status);
switch (seg->status) {
case WA_SEG_NOTREADY:
case WA_SEG_READY:
@@ -1127,50 +1991,68 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
WARN_ON(1);
break;
case WA_SEG_DELAYED:
+ /*
+ * delete from rpipe delayed list. If no segments on
+ * this xfer have been submitted, __wa_xfer_is_done will
+ * trigger a giveback below. Otherwise, the submitted
+ * segments will be completed in the DTI interrupt.
+ */
seg->status = WA_SEG_ABORTED;
- spin_lock_irqsave(&rpipe->seg_lock, flags2);
+ seg->result = -ENOENT;
list_del(&seg->list_node);
xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
- break;
- case WA_SEG_SUBMITTED:
- seg->status = WA_SEG_ABORTED;
- usb_unlink_urb(&seg->urb);
- if (xfer->is_inbound == 0)
- usb_unlink_urb(seg->dto_urb);
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- break;
- case WA_SEG_PENDING:
- seg->status = WA_SEG_ABORTED;
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- break;
- case WA_SEG_DTI_PENDING:
- usb_unlink_urb(wa->dti_urb);
- seg->status = WA_SEG_ABORTED;
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
break;
case WA_SEG_DONE:
case WA_SEG_ERROR:
case WA_SEG_ABORTED:
break;
+ /*
+ * The buf_in data for a segment in the
+ * WA_SEG_DTI_PENDING state is actively being read.
+ * Let wa_buf_in_cb handle it since it will be called
+ * and will increment xfer->segs_done. Cleaning up
+ * here could cause wa_buf_in_cb to access the xfer
+ * after it has been completed/freed.
+ */
+ case WA_SEG_DTI_PENDING:
+ break;
+ /*
+ * In the states below, the HWA device already knows
+ * about the transfer. If an abort request was sent,
+ * allow the HWA to process it and wait for the
+ * results. Otherwise, the DTI state and seg completed
+ * counts can get out of sync.
+ */
+ case WA_SEG_SUBMITTED:
+ case WA_SEG_PENDING:
+ /*
+ * Check if the abort was successfully sent. This could
+ * be false if the HWA has been removed but we haven't
+ * gotten the disconnect notification yet.
+ */
+ if (!xfer_abort_pending) {
+ seg->status = WA_SEG_ABORTED;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ xfer->segs_done++;
+ }
+ break;
}
}
+ spin_unlock(&rpipe->seg_lock);
xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
- __wa_xfer_is_done(xfer);
+ done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
- wa_xfer_completion(xfer);
+ if (done)
+ wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
- return 0;
+ wa_xfer_put(xfer);
+ return result;
out_unlock:
spin_unlock_irqrestore(&xfer->lock, flags);
-out:
- return 0;
+ wa_xfer_put(xfer);
+ return result;
dequeue_delayed:
list_del_init(&xfer->list_node);
@@ -1178,6 +2060,7 @@ dequeue_delayed:
xfer->result = urb->status;
spin_unlock_irqrestore(&xfer->lock, flags);
wa_xfer_giveback(xfer);
+ wa_xfer_put(xfer);
usb_put_urb(urb); /* we got a ref in enqueue() */
return 0;
}
@@ -1206,7 +2089,7 @@ static int wa_xfer_status_to_errno(u8 status)
[WA_XFER_STATUS_NOT_FOUND] = 0,
[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
[WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
- [WA_XFER_STATUS_ABORTED] = -EINTR,
+ [WA_XFER_STATUS_ABORTED] = -ENOENT,
[WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
[WA_XFER_INVALID_FORMAT] = EINVAL,
[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
@@ -1217,16 +2100,14 @@ static int wa_xfer_status_to_errno(u8 status)
if (status == 0)
return 0;
if (status >= ARRAY_SIZE(xlat)) {
- if (printk_ratelimit())
- printk(KERN_ERR "%s(): BUG? "
+ printk_ratelimited(KERN_ERR "%s(): BUG? "
"Unknown WA transfer status 0x%02x\n",
__func__, real_status);
return -EINVAL;
}
errno = xlat[status];
if (unlikely(errno > 0)) {
- if (printk_ratelimit())
- printk(KERN_ERR "%s(): BUG? "
+ printk_ratelimited(KERN_ERR "%s(): BUG? "
"Inconsistent WA status: 0x%02x\n",
__func__, real_status);
errno = -errno;
@@ -1235,24 +2116,178 @@ static int wa_xfer_status_to_errno(u8 status)
}
/*
+ * If a last segment flag and/or a transfer result error is encountered,
+ * no other segment transfer results will be returned from the device.
+ * Mark the remaining submitted or pending xfers as completed so that
+ * the xfer will complete cleanly.
+ *
+ * xfer->lock must be held
+ *
+ */
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ int starting_index, enum wa_seg_status status)
+{
+ int index;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ for (index = starting_index; index < xfer->segs_submitted; index++) {
+ struct wa_seg *current_seg = xfer->seg[index];
+
+ BUG_ON(current_seg == NULL);
+
+ switch (current_seg->status) {
+ case WA_SEG_SUBMITTED:
+ case WA_SEG_PENDING:
+ case WA_SEG_DTI_PENDING:
+ rpipe_avail_inc(rpipe);
+ /*
+ * do not increment RPIPE avail for the WA_SEG_DELAYED case
+ * since it has not been submitted to the RPIPE.
+ */
+ case WA_SEG_DELAYED:
+ xfer->segs_done++;
+ current_seg->status = status;
+ break;
+ case WA_SEG_ABORTED:
+ break;
+ default:
+ WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
+ __func__, wa_xfer_id(xfer), index,
+ current_seg->status);
+ break;
+ }
+ }
+}
+
+/* Populate the given urb based on the current isoc transfer state. */
+static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
+ struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
+{
+ int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
+ int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
+ int next_frame_contiguous;
+ struct usb_iso_packet_descriptor *iso_frame;
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+
+ /*
+ * If the current frame actual_length is contiguous with the next frame
+ * and actual_length is a multiple of the DTI endpoint max packet size,
+ * combine the current frame with the next frame in a single URB. This
+ * reduces the number of URBs that must be submitted in that case.
+ */
+ seg_index = seg->isoc_frame_index;
+ do {
+ next_frame_contiguous = 0;
+
+ iso_frame = &iso_frame_desc[urb_frame_index];
+ total_len += iso_frame->actual_length;
+ ++urb_frame_index;
+ ++seg_index;
+
+ if (seg_index < seg->isoc_frame_count) {
+ struct usb_iso_packet_descriptor *next_iso_frame;
+
+ next_iso_frame = &iso_frame_desc[urb_frame_index];
+
+ if ((iso_frame->offset + iso_frame->actual_length) ==
+ next_iso_frame->offset)
+ next_frame_contiguous = 1;
+ }
+ } while (next_frame_contiguous
+ && ((iso_frame->actual_length % dti_packet_size) == 0));
+
+ /* this should always be 0 before a resubmit. */
+ buf_in_urb->num_mapped_sgs = 0;
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
+ iso_frame_desc[urb_start_frame].offset;
+ buf_in_urb->transfer_buffer_length = total_len;
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ buf_in_urb->context = seg;
+
+ /* return the number of frames included in this URB. */
+ return seg_index - seg->isoc_frame_index;
+}
+
+/* Populate the given urb based on the current transfer state. */
+static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
+ unsigned int seg_idx, unsigned int bytes_transferred)
+{
+ int result = 0;
+ struct wa_seg *seg = xfer->seg[seg_idx];
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+ /* this should always be 0 before a resubmit. */
+ buf_in_urb->num_mapped_sgs = 0;
+
+ if (xfer->is_dma) {
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ + (seg_idx * xfer->seg_size);
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ } else {
+ /* do buffer or SG processing. */
+ buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
+
+ if (xfer->urb->transfer_buffer) {
+ buf_in_urb->transfer_buffer =
+ xfer->urb->transfer_buffer
+ + (seg_idx * xfer->seg_size);
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ } else {
+ /* allocate an SG list to store seg_size bytes
+ and copy the subset of the xfer->urb->sg
+ that matches the buffer subset we are
+ about to read. */
+ buf_in_urb->sg = wa_xfer_create_subset_sg(
+ xfer->urb->sg,
+ seg_idx * xfer->seg_size,
+ bytes_transferred,
+ &(buf_in_urb->num_sgs));
+
+ if (!(buf_in_urb->sg)) {
+ buf_in_urb->num_sgs = 0;
+ result = -ENOMEM;
+ }
+ buf_in_urb->transfer_buffer = NULL;
+ }
+ }
+ buf_in_urb->transfer_buffer_length = bytes_transferred;
+ buf_in_urb->context = seg;
+
+ return result;
+}
+
+/*
* Process a xfer result completion message
*
- * inbound transfers: need to schedule a DTI read
+ * inbound transfers: need to schedule a buf_in_urb read
*
- * FIXME: this functio needs to be broken up in parts
+ * FIXME: this function needs to be broken up in parts
*/
-static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
+static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
+ struct wa_xfer_result *xfer_result)
{
int result;
struct device *dev = &wa->usb_iface->dev;
unsigned long flags;
- u8 seg_idx;
+ unsigned int seg_idx;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
- struct wa_xfer_result *xfer_result = wa->xfer_result;
- u8 done = 0;
+ unsigned done = 0;
u8 usb_status;
unsigned rpipe_ready = 0;
+ unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
+ struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
spin_lock_irqsave(&xfer->lock, flags);
seg_idx = xfer_result->bTransferSegment & 0x7f;
@@ -1261,8 +2296,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
seg = xfer->seg[seg_idx];
rpipe = xfer->ep->hcpriv;
usb_status = xfer_result->bTransferStatus;
- dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
- xfer, seg_idx, usb_status, seg->status);
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
+ xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
if (seg->status == WA_SEG_ABORTED
|| seg->status == WA_SEG_ERROR) /* already handled */
goto segment_aborted;
@@ -1276,42 +2311,48 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
}
if (usb_status & 0x80) {
seg->result = wa_xfer_status_to_errno(usb_status);
- dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
- xfer, seg->index, usb_status);
+ dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
+ xfer, xfer->id, seg->index, usb_status);
+ seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
+ WA_SEG_ABORTED : WA_SEG_ERROR;
goto error_complete;
}
/* FIXME: we ignore warnings, tally them for stats */
if (usb_status & 0x40) /* Warning?... */
usb_status = 0; /* ... pass */
- if (xfer->is_inbound) { /* IN data phase: read to buffer */
+ /*
+ * If the last segment bit is set, complete the remaining segments.
+ * When the current segment is completed, either in wa_buf_in_cb for
+ * transfers with data or below for no data, the xfer will complete.
+ */
+ if (xfer_result->bTransferSegment & 0x80)
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
+ WA_SEG_DONE);
+ if (usb_pipeisoc(xfer->urb->pipe)
+ && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
+ /* set up WA state to read the isoc packet status next. */
+ wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
+ wa->dti_isoc_xfer_seg = seg_idx;
+ wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
+ } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
+ && (bytes_transferred > 0)) {
+ /* IN data phase: read to buffer */
seg->status = WA_SEG_DTI_PENDING;
- BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
- if (xfer->is_dma) {
- wa->buf_in_urb->transfer_dma =
- xfer->urb->transfer_dma
- + seg_idx * xfer->seg_size;
- wa->buf_in_urb->transfer_flags
- |= URB_NO_TRANSFER_DMA_MAP;
- } else {
- wa->buf_in_urb->transfer_buffer =
- xfer->urb->transfer_buffer
- + seg_idx * xfer->seg_size;
- wa->buf_in_urb->transfer_flags
- &= ~URB_NO_TRANSFER_DMA_MAP;
- }
- wa->buf_in_urb->transfer_buffer_length =
- le32_to_cpu(xfer_result->dwTransferLength);
- wa->buf_in_urb->context = seg;
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+ result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
+ bytes_transferred);
if (result < 0)
+ goto error_buf_in_populate;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
goto error_submit_buf_in;
+ }
} else {
- /* OUT data phase, complete it -- */
- seg->status = WA_SEG_DONE;
- seg->result = le32_to_cpu(xfer_result->dwTransferLength);
- xfer->segs_done++;
+ /* OUT data phase or no data, complete it -- */
+ seg->result = bytes_transferred;
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -1330,22 +2371,44 @@ error_submit_buf_in:
dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
xfer, seg_idx, result);
seg->result = result;
-error_complete:
+ kfree(buf_in_urb->sg);
+ buf_in_urb->sg = NULL;
+error_buf_in_populate:
+ __wa_xfer_abort(xfer);
seg->status = WA_SEG_ERROR;
+error_complete:
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
done = __wa_xfer_is_done(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
+ /*
+ * queue work item to clear STALL for control endpoints.
+ * Otherwise, let endpoint_reset take care of it.
+ */
+ if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
+ usb_endpoint_xfer_control(&xfer->ep->desc) &&
+ done) {
+
+ dev_info(dev, "Control EP stall. Queue delayed work.\n");
+ spin_lock(&wa->xfer_list_lock);
+ /* move xfer from xfer_list to xfer_errored_list. */
+ list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
+ spin_unlock(&wa->xfer_list_lock);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ queue_work(wusbd, &wa->xfer_error_work);
+ } else {
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+
return;
error_bad_seg:
spin_unlock_irqrestore(&xfer->lock, flags);
- wa_urb_dequeue(wa, xfer->urb);
+ wa_urb_dequeue(wa, xfer->urb, -ENOENT);
if (printk_ratelimit())
dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
@@ -1361,6 +2424,154 @@ segment_aborted:
}
/*
+ * Process a isochronous packet status message
+ *
+ * inbound transfers: need to schedule a buf_in_urb read
+ */
+static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
+{
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_xfer_packet_status_hwaiso *packet_status;
+ struct wa_xfer_packet_status_len_hwaiso *status_array;
+ struct wa_xfer *xfer;
+ unsigned long flags;
+ struct wa_seg *seg;
+ struct wa_rpipe *rpipe;
+ unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
+ unsigned first_frame_index = 0, rpipe_ready = 0;
+ int expected_size;
+
+ /* We have a xfer result buffer; check it */
+ dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
+ urb->actual_length, urb->transfer_buffer);
+ packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
+ if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
+ dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
+ packet_status->bPacketType);
+ goto error_parse_buffer;
+ }
+ xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
+ if (xfer == NULL) {
+ dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
+ wa->dti_isoc_xfer_in_progress);
+ goto error_parse_buffer;
+ }
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
+ goto error_bad_seg;
+ seg = xfer->seg[wa->dti_isoc_xfer_seg];
+ rpipe = xfer->ep->hcpriv;
+ expected_size = sizeof(*packet_status) +
+ (sizeof(packet_status->PacketStatus[0]) *
+ seg->isoc_frame_count);
+ if (urb->actual_length != expected_size) {
+ dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
+ urb->actual_length, expected_size);
+ goto error_bad_seg;
+ }
+ if (le16_to_cpu(packet_status->wLength) != expected_size) {
+ dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
+ le16_to_cpu(packet_status->wLength));
+ goto error_bad_seg;
+ }
+ /* write isoc packet status and lengths back to the xfer urb. */
+ status_array = packet_status->PacketStatus;
+ xfer->urb->start_frame =
+ wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
+ for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int xfer_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ iso_frame_desc[xfer_frame_index].status =
+ wa_xfer_status_to_errno(
+ le16_to_cpu(status_array[seg_index].PacketStatus));
+ iso_frame_desc[xfer_frame_index].actual_length =
+ le16_to_cpu(status_array[seg_index].PacketLength);
+ /* track the number of frames successfully transferred. */
+ if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
+ /* save the starting frame index for buf_in_urb. */
+ if (!data_frame_count)
+ first_frame_index = seg_index;
+ ++data_frame_count;
+ }
+ }
+
+ if (xfer->is_inbound && data_frame_count) {
+ int result, total_frames_read = 0, urb_index = 0;
+ struct urb *buf_in_urb;
+
+ /* IN data phase: read to buffer */
+ seg->status = WA_SEG_DTI_PENDING;
+
+ /* start with the first frame with data. */
+ seg->isoc_frame_index = first_frame_index;
+ /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
+ do {
+ int urb_frame_index, urb_frame_count;
+ struct usb_iso_packet_descriptor *iso_frame_desc;
+
+ buf_in_urb = &(wa->buf_in_urbs[urb_index]);
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
+ buf_in_urb, xfer, seg);
+ /* advance frame index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ total_frames_read += urb_frame_count;
+
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+
+ /* skip 0-byte frames. */
+ urb_frame_index =
+ seg->isoc_frame_offset + seg->isoc_frame_index;
+ iso_frame_desc =
+ &(xfer->urb->iso_frame_desc[urb_frame_index]);
+ while ((seg->isoc_frame_index <
+ seg->isoc_frame_count) &&
+ (iso_frame_desc->actual_length == 0)) {
+ ++(seg->isoc_frame_index);
+ ++iso_frame_desc;
+ }
+ ++urb_index;
+
+ } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
+ && (seg->isoc_frame_index <
+ seg->isoc_frame_count));
+
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ } else if (data_frame_count > total_frames_read)
+ /* If we need to read more frames, set DTI busy. */
+ dti_busy = 1;
+ } else {
+ /* OUT transfer or no more IN data, complete it -- */
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (dti_busy)
+ wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
+ else
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ return dti_busy;
+
+error_bad_seg:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
+error_parse_buffer:
+ return dti_busy;
+}
+
+/*
* Callback for the IN data phase
*
* If successful transition state; otherwise, take a note of the
@@ -1377,23 +2588,86 @@ static void wa_buf_in_cb(struct urb *urb)
struct wahc *wa;
struct device *dev;
struct wa_rpipe *rpipe;
- unsigned rpipe_ready;
+ unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
unsigned long flags;
+ int resubmit_dti = 0, active_buf_in_urbs;
u8 done = 0;
+ /* free the sg if it was used. */
+ kfree(urb->sg);
+ urb->sg = NULL;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ --(wa->active_buf_in_urbs);
+ active_buf_in_urbs = wa->active_buf_in_urbs;
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int seg_index;
+
+ /*
+ * Find the next isoc frame with data and count how many
+ * frames with data remain.
+ */
+ seg_index = seg->isoc_frame_index;
+ while (seg_index < seg->isoc_frame_count) {
+ const int urb_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+ /* save the index of the next frame with data */
+ if (!isoc_data_frame_count)
+ seg->isoc_frame_index = seg_index;
+ ++isoc_data_frame_count;
+ }
+ ++seg_index;
+ }
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
switch (urb->status) {
case 0:
spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- rpipe = xfer->ep->hcpriv;
- dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
- xfer, seg->index, (size_t)urb->actual_length);
- seg->status = WA_SEG_DONE;
- seg->result = urb->actual_length;
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+
+ seg->result += urb->actual_length;
+ if (isoc_data_frame_count > 0) {
+ int result, urb_frame_count;
+
+ /* submit a read URB for the next frame with data. */
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
+ xfer, seg);
+ /* advance index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ }
+ /*
+ * If we are in this callback and
+ * isoc_data_frame_count > 0, it means that the dti_urb
+ * submission was delayed in wa_dti_cb. Once
+ * we submit the last buf_in_urb, we can submit the
+ * delayed dti_urb.
+ */
+ resubmit_dti = (isoc_data_frame_count ==
+ urb_frame_count);
+ } else if (active_buf_in_urbs == 0) {
+ rpipe = xfer->ep->hcpriv;
+ dev_dbg(dev,
+ "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ seg->result);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_DONE);
+ }
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
@@ -1404,31 +2678,50 @@ static void wa_buf_in_cb(struct urb *urb)
case -ENOENT: /* as it was done by the who unlinked us */
break;
default: /* Other errors ... */
+ /*
+ * Error on data buf read. Only resubmit DTI if it hasn't
+ * already been done by previously hitting this error or by a
+ * successful completion of the previous buf_in_urb.
+ */
+ resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
rpipe = xfer->ep->hcpriv;
if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: data in error %d\n",
- xfer, seg->index, urb->status);
+ dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
dev_err(dev, "DTO: URB max acceptable errors "
"exceeded, resetting device\n");
wa_reset_all(wa);
}
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
- done = __wa_xfer_is_done(xfer);
+ if (active_buf_in_urbs == 0)
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
+ else
+ __wa_xfer_abort(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+
+ if (resubmit_dti) {
+ int result;
+
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
+ }
}
/*
@@ -1457,57 +2750,65 @@ static void wa_buf_in_cb(struct urb *urb)
* We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
* errors) in the URBs.
*/
-static void wa_xfer_result_cb(struct urb *urb)
+static void wa_dti_cb(struct urb *urb)
{
- int result;
+ int result, dti_busy = 0;
struct wahc *wa = urb->context;
struct device *dev = &wa->usb_iface->dev;
- struct wa_xfer_result *xfer_result;
u32 xfer_id;
- struct wa_xfer *xfer;
u8 usb_status;
BUG_ON(wa->dti_urb != urb);
switch (wa->dti_urb->status) {
case 0:
- /* We have a xfer result buffer; check it */
- dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
- urb->actual_length, urb->transfer_buffer);
- if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
- dev_err(dev, "DTI Error: xfer result--bad size "
- "xfer result (%d bytes vs %zu needed)\n",
- urb->actual_length, sizeof(*xfer_result));
- break;
- }
- xfer_result = wa->xfer_result;
- if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
- dev_err(dev, "DTI Error: xfer result--"
- "bad header length %u\n",
- xfer_result->hdr.bLength);
- break;
- }
- if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
- dev_err(dev, "DTI Error: xfer result--"
- "bad header type 0x%02x\n",
- xfer_result->hdr.bNotifyType);
- break;
- }
- usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_ABORTED
- || usb_status == WA_XFER_STATUS_NOT_FOUND)
- /* taken care of already */
- break;
- xfer_id = xfer_result->dwTransferID;
- xfer = wa_xfer_get_by_id(wa, xfer_id);
- if (xfer == NULL) {
- /* FIXME: transaction might have been cancelled */
- dev_err(dev, "DTI Error: xfer result--"
- "unknown xfer 0x%08x (status 0x%02x)\n",
- xfer_id, usb_status);
- break;
+ if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
+ struct wa_xfer_result *xfer_result;
+ struct wa_xfer *xfer;
+
+ /* We have a xfer result buffer; check it */
+ dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
+ urb->actual_length, urb->transfer_buffer);
+ if (urb->actual_length != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
+ urb->actual_length,
+ sizeof(*xfer_result));
+ break;
+ }
+ xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
+ if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
+ xfer_result->hdr.bLength);
+ break;
+ }
+ if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
+ dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
+ xfer_result->hdr.bNotifyType);
+ break;
+ }
+ xfer_id = le32_to_cpu(xfer_result->dwTransferID);
+ usb_status = xfer_result->bTransferStatus & 0x3f;
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
+ /* taken care of already */
+ dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
+ __func__, xfer_id,
+ xfer_result->bTransferSegment & 0x7f);
+ break;
+ }
+ xfer = wa_xfer_get_by_id(wa, xfer_id);
+ if (xfer == NULL) {
+ /* FIXME: transaction not found. */
+ dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
+ xfer_id, usb_status);
+ break;
+ }
+ wa_xfer_result_chew(wa, xfer, xfer_result);
+ wa_xfer_put(xfer);
+ } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
+ dti_busy = wa_process_iso_packet_status(wa, urb);
+ } else {
+ dev_err(dev, "DTI Error: unexpected EP state = %d\n",
+ wa->dti_state);
}
- wa_xfer_result_chew(wa, xfer);
- wa_xfer_put(xfer);
break;
case -ENOENT: /* (we killed the URB)...so, no broadcast */
case -ESHUTDOWN: /* going away! */
@@ -1526,18 +2827,69 @@ static void wa_xfer_result_cb(struct urb *urb)
dev_err(dev, "DTI: URB error %d\n", urb->status);
break;
}
- /* Resubmit the DTI URB */
- result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
- "resetting\n", result);
- wa_reset_all(wa);
+
+ /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
+ if (!dti_busy) {
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
}
out:
return;
}
/*
+ * Initialize the DTI URB for reading transfer result notifications and also
+ * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
+ */
+int wa_dti_start(struct wahc *wa)
+{
+ const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+ struct device *dev = &wa->usb_iface->dev;
+ int result = -ENOMEM, index;
+
+ if (wa->dti_urb != NULL) /* DTI URB already started */
+ goto out;
+
+ wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->dti_urb == NULL) {
+ dev_err(dev, "Can't allocate DTI URB\n");
+ goto error_dti_urb_alloc;
+ }
+ usb_fill_bulk_urb(
+ wa->dti_urb, wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
+ wa->dti_buf, wa->dti_buf_size,
+ wa_dti_cb, wa);
+
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
+ usb_fill_bulk_urb(
+ &(wa->buf_in_urbs[index]), wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev,
+ 0x80 | dti_epd->bEndpointAddress),
+ NULL, 0, wa_buf_in_cb, wa);
+ }
+ result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
+ result);
+ goto error_dti_urb_submit;
+ }
+out:
+ return 0;
+
+error_dti_urb_submit:
+ usb_put_urb(wa->dti_urb);
+ wa->dti_urb = NULL;
+error_dti_urb_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_dti_start);
+/*
* Transfer complete notification
*
* Called from the notif.c code. We get a notification on EP2 saying
@@ -1548,18 +2900,13 @@ out:
* don't really set it up and start it until the first xfer complete
* notification arrives, which is what we do here.
*
- * Follow up in wa_xfer_result_cb(), as that's where the whole state
+ * Follow up in wa_dti_cb(), as that's where the whole state
* machine starts.
*
- * So here we just initialize the DTI URB for reading transfer result
- * notifications and also the buffer-in URB, for reading buffers. Then
- * we just submit the DTI URB.
- *
* @wa shall be referenced
*/
void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
{
- int result;
struct device *dev = &wa->usb_iface->dev;
struct wa_notif_xfer *notif_xfer;
const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
@@ -1573,44 +2920,13 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
goto error;
}
- if (wa->dti_urb != NULL) /* DTI URB already started */
- goto out;
- wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->dti_urb == NULL) {
- dev_err(dev, "Can't allocate DTI URB\n");
- goto error_dti_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->dti_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- wa->xfer_result, wa->xfer_result_size,
- wa_xfer_result_cb, wa);
+ /* attempt to start the DTI ep processing. */
+ if (wa_dti_start(wa) < 0)
+ goto error;
- wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->buf_in_urb == NULL) {
- dev_err(dev, "Can't allocate BUF-IN URB\n");
- goto error_buf_in_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->buf_in_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- NULL, 0, wa_buf_in_cb, wa);
- result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
- "resetting\n", result);
- goto error_dti_urb_submit;
- }
-out:
return;
-error_dti_urb_submit:
- usb_put_urb(wa->buf_in_urb);
-error_buf_in_urb_alloc:
- usb_put_urb(wa->dti_urb);
- wa->dti_urb = NULL;
-error_dti_urb_alloc:
error:
wa_reset_all(wa);
}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 2054d4ee977..3e1ba51d1a4 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -55,7 +55,8 @@ static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
* value of trust_timeout is jiffies.
*/
static ssize_t wusb_trust_timeout_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
@@ -75,12 +76,11 @@ static ssize_t wusb_trust_timeout_store(struct device *dev,
result = -EINVAL;
goto out;
}
- /* FIXME: maybe we should check for range validity? */
- wusbhc->trust_timeout = trust_timeout;
+ wusbhc->trust_timeout = min_t(unsigned, trust_timeout, 500);
cancel_delayed_work(&wusbhc->keep_alive_timer);
flush_workqueue(wusbd);
queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
- (trust_timeout * CONFIG_HZ)/1000/2);
+ msecs_to_jiffies(wusbhc->trust_timeout / 2));
out:
return result < 0 ? result : size;
}
@@ -174,13 +174,76 @@ static ssize_t wusb_phy_rate_store(struct device *dev,
wusbhc->phy_rate = phy_rate;
return size;
}
-static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store);
+static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show,
+ wusb_phy_rate_store);
+
+static ssize_t wusb_dnts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return sprintf(buf, "num slots: %d\ninterval: %dms\n",
+ wusbhc->dnts_num_slots, wusbhc->dnts_interval);
+}
+
+static ssize_t wusb_dnts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ uint8_t num_slots, interval;
+ ssize_t result;
+
+ result = sscanf(buf, "%hhu %hhu", &num_slots, &interval);
+
+ if (result != 2)
+ return -EINVAL;
+
+ wusbhc->dnts_num_slots = num_slots;
+ wusbhc->dnts_interval = interval;
+
+ return size;
+}
+static DEVICE_ATTR(wusb_dnts, 0644, wusb_dnts_show, wusb_dnts_store);
+
+static ssize_t wusb_retry_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return sprintf(buf, "%d\n", wusbhc->retry_count);
+}
+
+static ssize_t wusb_retry_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ uint8_t retry_count;
+ ssize_t result;
+
+ result = sscanf(buf, "%hhu", &retry_count);
+
+ if (result != 1)
+ return -EINVAL;
+
+ wusbhc->retry_count = max_t(uint8_t, retry_count,
+ WUSB_RETRY_COUNT_MAX);
+
+ return size;
+}
+static DEVICE_ATTR(wusb_retry_count, 0644, wusb_retry_count_show,
+ wusb_retry_count_store);
/* Group all the WUSBHC attributes */
static struct attribute *wusbhc_attrs[] = {
&dev_attr_wusb_trust_timeout.attr,
&dev_attr_wusb_chid.attr,
&dev_attr_wusb_phy_rate.attr,
+ &dev_attr_wusb_dnts.attr,
+ &dev_attr_wusb_retry_count.attr,
NULL,
};
@@ -206,8 +269,12 @@ int wusbhc_create(struct wusbhc *wusbhc)
{
int result = 0;
+ /* set defaults. These can be overwritten using sysfs attributes. */
wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
+ wusbhc->dnts_num_slots = 4;
+ wusbhc->dnts_interval = 2;
+ wusbhc->retry_count = WUSB_RETRY_COUNT_INFINITE;
mutex_init(&wusbhc->mutex);
result = wusbhc_mmcie_create(wusbhc);
@@ -257,17 +324,12 @@ int wusbhc_b_create(struct wusbhc *wusbhc)
result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
if (result < 0) {
- dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result);
+ dev_err(dev, "Cannot register WUSBHC attributes: %d\n",
+ result);
goto error_create_attr_group;
}
- result = wusbhc_pal_register(wusbhc);
- if (result < 0)
- goto error_pal_register;
return 0;
-
-error_pal_register:
- sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
error_create_attr_group:
return result;
}
@@ -320,7 +382,7 @@ u8 wusb_cluster_id_get(void)
u8 id;
spin_lock(&wusb_cluster_ids_lock);
id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
- if (id > CLUSTER_IDS) {
+ if (id >= CLUSTER_IDS) {
id = 0;
goto out;
}
@@ -361,13 +423,14 @@ EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
* - After a successful transfer, update the trust timeout timestamp
* for the WUSB device.
*
- * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion
+ * - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission
* condition for the WCONNECTACK_IE is that the host has observed
* the associated device responding to a control transfer.
*/
void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
{
- struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+ struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc,
+ urb->dev);
if (status == 0 && wusb_dev) {
wusb_dev->entry_ts = jiffies;
@@ -393,7 +456,8 @@ EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
*/
void wusbhc_reset_all(struct wusbhc *wusbhc)
{
- uwb_rc_reset_all(wusbhc->uwb_rc);
+ if (wusbhc->uwb_rc)
+ uwb_rc_reset_all(wusbhc->uwb_rc);
}
EXPORT_SYMBOL_GPL(wusbhc_reset_all);
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 3d94c4247f4..2384add4537 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -69,6 +69,8 @@
* zone 0.
*/
#define WUSB_CHANNEL_STOP_DELAY_MS 8
+#define WUSB_RETRY_COUNT_MAX 15
+#define WUSB_RETRY_COUNT_INFINITE 0
/**
* Wireless USB device
@@ -95,6 +97,7 @@ struct wusb_dev {
struct kref refcnt;
struct wusbhc *wusbhc;
struct list_head cack_node; /* Connect-Ack list */
+ struct list_head rekey_node; /* GTK rekey list */
u8 port_idx;
u8 addr;
u8 beacon_type:4;
@@ -105,8 +108,6 @@ struct wusb_dev {
struct usb_wireless_cap_descriptor *wusb_cap_descr;
struct uwb_mas_bm availability;
struct work_struct devconnect_acked_work;
- struct urb *set_gtk_urb;
- struct usb_ctrlrequest *set_gtk_req;
struct usb_device *usb_dev;
};
@@ -132,7 +133,7 @@ static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
}
/**
- * Wireless USB Host Controlller root hub "fake" ports
+ * Wireless USB Host Controller root hub "fake" ports
* (state and device information)
*
* Wireless USB is wireless, so there are no ports; but we
@@ -163,7 +164,7 @@ struct wusb_port {
* functions/operations that only deal with general Wireless USB HC
* issues use this data type to refer to the host.
*
- * @usb_hcd Instantiation of a USB host controller
+ * @usb_hcd Instantiation of a USB host controller
* (initialized by upper layer [HWA=HC or WHCI].
*
* @dev Device that implements this; initialized by the
@@ -195,7 +196,7 @@ struct wusb_port {
* @ports_max Number of simultaneous device connections (fake
* ports) this HC will take. Read-only.
*
- * @port Array of port status for each fake root port. Guaranteed to
+ * @port Array of port status for each fake root port. Guaranteed to
* always be the same length during device existence
* [this allows for some unlocked but referenced reading].
*
@@ -231,7 +232,7 @@ struct wusb_port {
*
* Most of the times when you need to use it, it will be non-NULL,
* so there is no real need to check for it (wusb_dev will
- * dissapear before usb_dev).
+ * disappear before usb_dev).
*
* - The following fields need to be filled out before calling
* wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
@@ -252,6 +253,9 @@ struct wusbhc {
unsigned trust_timeout; /* in jiffies */
struct wusb_ckhdid chid;
uint8_t phy_rate;
+ uint8_t dnts_num_slots;
+ uint8_t dnts_interval;
+ uint8_t retry_count;
struct wuie_host_info *wuie_host_info;
struct mutex mutex; /* locks everything else */
@@ -291,8 +295,7 @@ struct wusbhc {
} __attribute__((packed)) gtk;
u8 gtk_index;
u32 gtk_tkid;
- struct work_struct gtk_rekey_done_work;
- int pending_set_gtks;
+ struct work_struct gtk_rekey_work;
struct usb_encryption_descriptor *ccm1_etd;
};
@@ -326,7 +329,8 @@ void wusbhc_pal_unregister(struct wusbhc *wusbhc);
* This is a safe assumption as @usb_dev->bus is referenced all the
* time during the @usb_dev life cycle.
*/
-static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
+static inline
+struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
{
struct usb_hcd *usb_hcd;
usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
@@ -399,8 +403,6 @@ extern void wusbhc_rh_destroy(struct wusbhc *);
extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
-extern int wusbhc_rh_suspend(struct usb_hcd *);
-extern int wusbhc_rh_resume(struct usb_hcd *);
extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
/* MMC handling */