diff options
Diffstat (limited to 'drivers/usb/wusbcore')
| -rw-r--r-- | drivers/usb/wusbcore/cbaf.c | 29 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/crypto.c | 2 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/devconnect.c | 82 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/mmc.c | 11 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/pal.c | 1 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/reservation.c | 1 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/security.c | 136 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/wa-hc.c | 20 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/wa-hc.h | 83 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/wa-nep.c | 10 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/wa-rpipe.c | 47 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/wa-xfer.c | 1927 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/wusbhc.c | 17 | ||||
| -rw-r--r-- | drivers/usb/wusbcore/wusbhc.h | 13 | 
14 files changed, 1771 insertions, 608 deletions
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c index 7f78f300f8f..da1b872918b 100644 --- a/drivers/usb/wusbcore/cbaf.c +++ b/drivers/usb/wusbcore/cbaf.c @@ -144,7 +144,7 @@ static int cbaf_check(struct cbaf *cbaf)  		CBAF_REQ_GET_ASSOCIATION_INFORMATION,  		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,  		0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, -		cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); +		cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "Cannot get available association types: %d\n",  			result); @@ -184,7 +184,7 @@ static int cbaf_check(struct cbaf *cbaf)  		assoc_request = itr;  		if (top - itr < sizeof(*assoc_request)) { -			dev_err(dev, "Not enough data to decode associaton " +			dev_err(dev, "Not enough data to decode association "  				"request (%zu vs %zu bytes needed)\n",  				top - itr, sizeof(*assoc_request));  			break; @@ -208,9 +208,9 @@ static int cbaf_check(struct cbaf *cbaf)  				ar_name = "ASSOCIATE";  				ar_assoc = 1;  				break; -			}; +			}  			break; -		}; +		}  		dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "  			 "(%zu bytes): %s\n", @@ -235,7 +235,7 @@ static int cbaf_check(struct cbaf *cbaf)  static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {  	.AssociationTypeId_hdr    = WUSB_AR_AssociationTypeId, -	.AssociationTypeId    	  = cpu_to_le16(AR_TYPE_WUSB), +	.AssociationTypeId	  = cpu_to_le16(AR_TYPE_WUSB),  	.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,  	.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),  	.CHID_hdr                 = WUSB_AR_CHID, @@ -260,12 +260,13 @@ static int cbaf_send_host_info(struct cbaf *cbaf)  	hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);  	hi_size = sizeof(*hi) + name_len; -	return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), +	return usb_control_msg(cbaf->usb_dev, +			usb_sndctrlpipe(cbaf->usb_dev, 0),  			CBAF_REQ_SET_ASSOCIATION_RESPONSE,  			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,  			0x0101,  			cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, -			hi, hi_size, 1000 /* FIXME: arbitrary */); +			hi, hi_size, USB_CTRL_SET_TIMEOUT);  }  /* @@ -288,9 +289,10 @@ static int cbaf_cdid_get(struct cbaf *cbaf)  		CBAF_REQ_GET_ASSOCIATION_REQUEST,  		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,  		0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, -		di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); +		di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);  	if (result < 0) { -		dev_err(dev, "Cannot request device information: %d\n", result); +		dev_err(dev, "Cannot request device information: %d\n", +			result);  		return result;  	} @@ -491,11 +493,11 @@ static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);  static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {  	.AssociationTypeId_hdr    = WUSB_AR_AssociationTypeId, -	.AssociationTypeId    	  = cpu_to_le16(AR_TYPE_WUSB), +	.AssociationTypeId	  = cpu_to_le16(AR_TYPE_WUSB),  	.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,  	.AssociationSubTypeId     = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),  	.Length_hdr               = WUSB_AR_Length, -	.Length               	  = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)), +	.Length		= cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),  	.ConnectionContext_hdr    = WUSB_AR_ConnectionContext,  	.BandGroups_hdr           = WUSB_AR_BandGroups,  }; @@ -536,7 +538,7 @@ static int cbaf_cc_upload(struct cbaf *cbaf)  		CBAF_REQ_SET_ASSOCIATION_RESPONSE,  		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,  		0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, -		ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */); +		ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT);  	return result;  } @@ -623,6 +625,8 @@ static int cbaf_probe(struct usb_interface *iface,  error_create_group:  error_check: +	usb_put_intf(iface); +	usb_put_dev(cbaf->usb_dev);  	kfree(cbaf->buffer);  error_kmalloc_buffer:  	kfree(cbaf); @@ -637,6 +641,7 @@ static void cbaf_disconnect(struct usb_interface *iface)  	sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);  	usb_set_intfdata(iface, NULL);  	usb_put_intf(iface); +	usb_put_dev(cbaf->usb_dev);  	kfree(cbaf->buffer);  	/* paranoia: clean up crypto keys */  	kzfree(cbaf); diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index 7e4bf95f8f7..9a95b2dc6d1 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c @@ -87,7 +87,7 @@ struct aes_ccm_block {   * B1 contains l(a), the MAC header, the encryption offset and padding.   *   * If EO is nonzero, additional blocks are built from payload bytes - * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The + * until EO is exhausted (FIXME: padding to 16 bytes, I guess). The   * padding is not xmitted.   */ diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index 33a12788f9c..0677139c606 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c @@ -97,18 +97,12 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work);  static void wusb_dev_free(struct wusb_dev *wusb_dev)  { -	if (wusb_dev) { -		kfree(wusb_dev->set_gtk_req); -		usb_free_urb(wusb_dev->set_gtk_urb); -		kfree(wusb_dev); -	} +	kfree(wusb_dev);  }  static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)  {  	struct wusb_dev *wusb_dev; -	struct urb *urb; -	struct usb_ctrlrequest *req;  	wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);  	if (wusb_dev == NULL) @@ -118,22 +112,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)  	INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); -	urb = usb_alloc_urb(0, GFP_KERNEL); -	if (urb == NULL) -		goto err; -	wusb_dev->set_gtk_urb = urb; - -	req = kmalloc(sizeof(*req), GFP_KERNEL); -	if (req == NULL) -		goto err; -	wusb_dev->set_gtk_req = req; - -	req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; -	req->bRequest = USB_REQ_SET_DESCRIPTOR; -	req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index); -	req->wIndex = 0; -	req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); -  	return wusb_dev;  err:  	wusb_dev_free(wusb_dev); @@ -287,9 +265,9 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work)   * Addresses: because WUSB hosts have no downstream hubs, we can do a   *            1:1 mapping between 'port number' and device   *            address. This simplifies many things, as during this - *            initial connect phase the USB stack has no knoledge of + *            initial connect phase the USB stack has no knowledge of   *            the device and hasn't assigned an address yet--we know - *            USB's choose_address() will use the same euristics we + *            USB's choose_address() will use the same heuristics we   *            use here, so we can assume which address will be assigned.   *   *            USB stack always assigns address 1 to the root hub, so @@ -306,7 +284,7 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,  	struct device *dev = wusbhc->dev;  	struct wusb_dev *wusb_dev;  	struct wusb_port *port; -	unsigned idx, devnum; +	unsigned idx;  	mutex_lock(&wusbhc->mutex); @@ -334,8 +312,6 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,  		goto error_unlock;  	} -	devnum = idx + 2; -  	/* Make sure we are using no crypto on that "virtual port" */  	wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0); @@ -411,9 +387,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,  /*   * Refresh the list of keep alives to emit in the MMC   * - * Some devices don't respond to keep alives unless they've been - * authenticated, so skip unauthenticated devices. - *   * We only publish the first four devices that have a coming timeout   * condition. Then when we are done processing those, we go for the   * next ones. We ignore the ones that have timed out already (they'll @@ -448,7 +421,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)  		if (wusb_dev == NULL)  			continue; -		if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) +		if (wusb_dev->usb_dev == NULL)  			continue;  		if (time_after(jiffies, wusb_dev->entry_ts + tt)) { @@ -524,11 +497,19 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)   *   * @wusbhc shall be referenced and unlocked   */ -static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr)  { +	struct wusb_dev *wusb_dev; +  	mutex_lock(&wusbhc->mutex); -	wusb_dev->entry_ts = jiffies; -	__wusbhc_keep_alive(wusbhc); +	wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); +	if (wusb_dev == NULL) { +		dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n", +			srcaddr); +	} else { +		wusb_dev->entry_ts = jiffies; +		__wusbhc_keep_alive(wusbhc); +	}  	mutex_unlock(&wusbhc->mutex);  } @@ -582,14 +563,22 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,   *   * @wusbhc shall be referenced and unlocked   */ -static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr)  {  	struct device *dev = wusbhc->dev; - -	dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr); +	struct wusb_dev *wusb_dev;  	mutex_lock(&wusbhc->mutex); -	__wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); +	wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); +	if (wusb_dev == NULL) { +		dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n", +			srcaddr); +	} else { +		dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", +			wusb_dev->addr); +		__wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, +			wusb_dev->port_idx)); +	}  	mutex_unlock(&wusbhc->mutex);  } @@ -611,30 +600,21 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,  		      struct wusb_dn_hdr *dn_hdr, size_t size)  {  	struct device *dev = wusbhc->dev; -	struct wusb_dev *wusb_dev;  	if (size < sizeof(struct wusb_dn_hdr)) {  		dev_err(dev, "DN data shorter than DN header (%d < %d)\n",  			(int)size, (int)sizeof(struct wusb_dn_hdr));  		return;  	} - -	wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); -	if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { -		dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", -			dn_hdr->bType, srcaddr); -		return; -	} -  	switch (dn_hdr->bType) {  	case WUSB_DN_CONNECT:  		wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);  		break;  	case WUSB_DN_ALIVE: -		wusbhc_handle_dn_alive(wusbhc, wusb_dev); +		wusbhc_handle_dn_alive(wusbhc, srcaddr);  		break;  	case WUSB_DN_DISCONNECT: -		wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); +		wusbhc_handle_dn_disconnect(wusbhc, srcaddr);  		break;  	case WUSB_DN_MASAVAILCHANGED:  	case WUSB_DN_RWAKE: @@ -973,7 +953,7 @@ int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,  	default:  		WARN_ON(1);  		result = NOTIFY_BAD; -	}; +	}  	return result;  } diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c index b71760c8d3a..3f485df9622 100644 --- a/drivers/usb/wusbcore/mmc.c +++ b/drivers/usb/wusbcore/mmc.c @@ -206,13 +206,15 @@ int wusbhc_start(struct wusbhc *wusbhc)  	result = wusbhc_devconnect_start(wusbhc);  	if (result < 0) { -		dev_err(dev, "error enabling device connections: %d\n", result); +		dev_err(dev, "error enabling device connections: %d\n", +			result);  		goto error_devconnect_start;  	}  	result = wusbhc_sec_start(wusbhc);  	if (result < 0) { -		dev_err(dev, "error starting security in the HC: %d\n", result); +		dev_err(dev, "error starting security in the HC: %d\n", +			result);  		goto error_sec_start;  	} @@ -284,7 +286,8 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)  		wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);  		if (wusbhc->uwb_rc == NULL) {  			result = -ENODEV; -			dev_err(wusbhc->dev, "Cannot get associated UWB Host Controller\n"); +			dev_err(wusbhc->dev, +				"Cannot get associated UWB Host Controller\n");  			goto error_rc_get;  		} @@ -298,7 +301,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)  	if (chid)  		result = uwb_radio_start(&wusbhc->pal); -	else +	else if (wusbhc->uwb_rc)  		uwb_radio_stop(&wusbhc->pal);  	return result; diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c index 59e100c2eb5..090f27371a8 100644 --- a/drivers/usb/wusbcore/pal.c +++ b/drivers/usb/wusbcore/pal.c @@ -22,6 +22,7 @@ static void wusbhc_channel_changed(struct uwb_pal *pal, int channel)  {  	struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal); +	dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel);  	if (channel < 0)  		wusbhc_stop(wusbhc);  	else diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c index ead79f79392..d5efd0f07d2 100644 --- a/drivers/usb/wusbcore/reservation.c +++ b/drivers/usb/wusbcore/reservation.c @@ -51,6 +51,7 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)  	struct uwb_mas_bm mas;  	char buf[72]; +	dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);  	switch (rsv->state) {  	case UWB_RSV_STATE_O_ESTABLISHED:  		uwb_rsv_get_usable_mas(rsv, &mas); diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index dd88441c8f7..95be9953cd4 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c @@ -29,19 +29,17 @@  #include <linux/export.h>  #include "wusbhc.h" -static void wusbhc_set_gtk_callback(struct urb *urb); -static void wusbhc_gtk_rekey_done_work(struct work_struct *work); +static void wusbhc_gtk_rekey_work(struct work_struct *work);  int wusbhc_sec_create(struct wusbhc *wusbhc)  { -	wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); +	wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + +		sizeof(wusbhc->gtk.data);  	wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;  	wusbhc->gtk.descr.bReserved = 0; +	wusbhc->gtk_index = 0; -	wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, -					   WUSB_KEY_INDEX_ORIGINATOR_HOST); - -	INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work); +	INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);  	return 0;  } @@ -59,7 +57,7 @@ void wusbhc_sec_destroy(struct wusbhc *wusbhc)   * @wusb_dev: the device whose PTK the TKID is for   *            (or NULL for a TKID for a GTK)   * - * The generated TKID consist of two parts: the device's authenicated + * The generated TKID consists of two parts: the device's authenticated   * address (or 0 or a GTK); and an incrementing number.  This ensures   * that TKIDs cannot be shared between devices and by the time the   * incrementing number wraps around the older TKIDs will no longer be @@ -113,7 +111,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)  	wusbhc_generate_gtk(wusbhc);  	result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, -				 &wusbhc->gtk.descr.bKeyData, key_size); +				&wusbhc->gtk.descr.bKeyData, key_size);  	if (result < 0)  		dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",  			result); @@ -129,7 +127,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc)   */  void wusbhc_sec_stop(struct wusbhc *wusbhc)  { -	cancel_work_sync(&wusbhc->gtk_rekey_done_work); +	cancel_work_sync(&wusbhc->gtk_rekey_work);  } @@ -141,7 +139,7 @@ const char *wusb_et_name(u8 x)  	case USB_ENC_TYPE_WIRED:	return "wired";  	case USB_ENC_TYPE_CCM_1:	return "CCM-1";  	case USB_ENC_TYPE_RSA_1:	return "RSA-1"; -	default: 			return "unknown"; +	default:			return "unknown";  	}  }  EXPORT_SYMBOL_GPL(wusb_et_name); @@ -168,7 +166,7 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)  	result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),  			USB_REQ_SET_ENCRYPTION,  			USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, -			value, 0, NULL, 0, 1000 /* FIXME: arbitrary */); +			value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);  	if (result < 0)  		dev_err(dev, "Can't set device's WUSB encryption to "  			"%s (value %d): %d\n", @@ -185,14 +183,16 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)  static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)  {  	struct usb_device *usb_dev = wusb_dev->usb_dev; +	u8 key_index = wusb_key_index(wusbhc->gtk_index, +		WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);  	return usb_control_msg(  		usb_dev, usb_sndctrlpipe(usb_dev, 0),  		USB_REQ_SET_DESCRIPTOR,  		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, -		USB_DT_KEY << 8 | wusbhc->gtk_index, 0, +		USB_DT_KEY << 8 | key_index, 0,  		&wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, -		1000); +		USB_CTRL_SET_TIMEOUT);  } @@ -223,7 +223,8 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,  	secd_size = le16_to_cpu(secd->wTotalLength);  	new_secd = krealloc(secd, secd_size, GFP_KERNEL);  	if (new_secd == NULL) { -		dev_err(dev, "Can't allocate space for security descriptors\n"); +		dev_err(dev, +			"Can't allocate space for security descriptors\n");  		goto out;  	}  	secd = new_secd; @@ -302,8 +303,9 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)  	/* Set address 0 */  	result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), -				 USB_REQ_SET_ADDRESS, 0, -				 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */); +			USB_REQ_SET_ADDRESS, +			USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, +			 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "auth failed: can't set address 0: %d\n",  			result); @@ -317,9 +319,10 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)  	/* Set new (authenticated) address. */  	result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), -				 USB_REQ_SET_ADDRESS, 0, -				 new_address, 0, NULL, 0, -				 1000 /* FIXME: arbitrary */); +			USB_REQ_SET_ADDRESS, +			USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, +			new_address, 0, NULL, 0, +			USB_CTRL_SET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "auth failed: can't set address %u: %d\n",  			new_address, result); @@ -376,13 +379,13 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,  	hs[0].bReserved = 0;  	memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));  	get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); -	memset(hs[0].MIC, 0, sizeof(hs[0].MIC));	/* Per WUSB1.0[T7-22] */ +	memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */  	result = usb_control_msg(  		usb_dev, usb_sndctrlpipe(usb_dev, 0),  		USB_REQ_SET_HANDSHAKE,  		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, -		1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */); +		1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "Handshake1: request failed: %d\n", result);  		goto error_hs1; @@ -393,7 +396,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,  		usb_dev, usb_rcvctrlpipe(usb_dev, 0),  		USB_REQ_GET_HANDSHAKE,  		USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, -		2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */); +		2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "Handshake2: request failed: %d\n", result);  		goto error_hs2; @@ -423,7 +426,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,  	}  	/* Setup the CCM nonce */ -	memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn));	/* Per WUSB1.0[6.5.2] */ +	memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */  	memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));  	ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;  	ccm_n.dest_addr.data[0] = wusb_dev->addr; @@ -470,7 +473,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,  		usb_dev, usb_sndctrlpipe(usb_dev, 0),  		USB_REQ_SET_HANDSHAKE,  		USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, -		3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */); +		3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "Handshake3: request failed: %d\n", result);  		goto error_hs3; @@ -520,24 +523,57 @@ error_kzalloc:   * Once all connected and authenticated devices have received the new   * GTK, switch the host to using it.   */ -static void wusbhc_gtk_rekey_done_work(struct work_struct *work) +static void wusbhc_gtk_rekey_work(struct work_struct *work)  { -	struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); +	struct wusbhc *wusbhc = container_of(work, +					struct wusbhc, gtk_rekey_work);  	size_t key_size = sizeof(wusbhc->gtk.data); +	int port_idx; +	struct wusb_dev *wusb_dev, *wusb_dev_next; +	LIST_HEAD(rekey_list);  	mutex_lock(&wusbhc->mutex); +	/* generate the new key */ +	wusbhc_generate_gtk(wusbhc); +	/* roll the gtk index. */ +	wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1); +	/* +	 * Save all connected devices on a list while holding wusbhc->mutex and +	 * take a reference to each one.  Then submit the set key request to +	 * them after releasing the lock in order to avoid a deadlock. +	 */ +	for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) { +		wusb_dev = wusbhc->port[port_idx].wusb_dev; +		if (!wusb_dev || !wusb_dev->usb_dev +			|| !wusb_dev->usb_dev->authenticated) +			continue; -	if (--wusbhc->pending_set_gtks == 0) -		wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); - +		wusb_dev_get(wusb_dev); +		list_add_tail(&wusb_dev->rekey_node, &rekey_list); +	}  	mutex_unlock(&wusbhc->mutex); -} -static void wusbhc_set_gtk_callback(struct urb *urb) -{ -	struct wusbhc *wusbhc = urb->context; +	/* Submit the rekey requests without holding wusbhc->mutex. */ +	list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list, +		rekey_node) { +		list_del_init(&wusb_dev->rekey_node); +		dev_dbg(&wusb_dev->usb_dev->dev, +			"%s: rekey device at port %d\n", +			__func__, wusb_dev->port_idx); + +		if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) { +			dev_err(&wusb_dev->usb_dev->dev, +				"%s: rekey device at port %d failed\n", +				__func__, wusb_dev->port_idx); +		} +		wusb_dev_put(wusb_dev); +	} -	queue_work(wusbd, &wusbhc->gtk_rekey_done_work); +	/* Switch the host controller to use the new GTK. */ +	mutex_lock(&wusbhc->mutex); +	wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, +		&wusbhc->gtk.descr.bKeyData, key_size); +	mutex_unlock(&wusbhc->mutex);  }  /** @@ -553,26 +589,12 @@ static void wusbhc_set_gtk_callback(struct urb *urb)   */  void wusbhc_gtk_rekey(struct wusbhc *wusbhc)  { -	static const size_t key_size = sizeof(wusbhc->gtk.data); -	int p; - -	wusbhc_generate_gtk(wusbhc); - -	for (p = 0; p < wusbhc->ports_max; p++) { -		struct wusb_dev *wusb_dev; - -		wusb_dev = wusbhc->port[p].wusb_dev; -		if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated) -			continue; - -		usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev, -				     usb_sndctrlpipe(wusb_dev->usb_dev, 0), -				     (void *)wusb_dev->set_gtk_req, -				     &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, -				     wusbhc_set_gtk_callback, wusbhc); -		if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0) -			wusbhc->pending_set_gtks++; -	} -	if (wusbhc->pending_set_gtks == 0) -		wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); +	/* +	 * We need to submit a URB to the downstream WUSB devices in order to +	 * change the group key.  This can't be done while holding the +	 * wusbhc->mutex since that is also taken in the urb_enqueue routine +	 * and will cause a deadlock.  Instead, queue a work item to do +	 * it when the lock is not held +	 */ +	queue_work(wusbd, &wusbhc->gtk_rekey_work);  } diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c index a09b65ebd9b..252c7bd9218 100644 --- a/drivers/usb/wusbcore/wa-hc.c +++ b/drivers/usb/wusbcore/wa-hc.c @@ -33,7 +33,8 @@   * wa->usb_dev and wa->usb_iface initialized and refcounted,   * wa->wa_descr initialized.   */ -int wa_create(struct wahc *wa, struct usb_interface *iface) +int wa_create(struct wahc *wa, struct usb_interface *iface, +	kernel_ulong_t quirks)  {  	int result;  	struct device *dev = &iface->dev; @@ -41,14 +42,15 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)  	result = wa_rpipes_create(wa);  	if (result < 0)  		goto error_rpipes_create; +	wa->quirks = quirks;  	/* Fill up Data Transfer EP pointers */  	wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;  	wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc; -	wa->xfer_result_size = usb_endpoint_maxp(wa->dti_epd); -	wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL); -	if (wa->xfer_result == NULL) { +	wa->dti_buf_size = usb_endpoint_maxp(wa->dti_epd); +	wa->dti_buf = kmalloc(wa->dti_buf_size, GFP_KERNEL); +	if (wa->dti_buf == NULL) {  		result = -ENOMEM; -		goto error_xfer_result_alloc; +		goto error_dti_buf_alloc;  	}  	result = wa_nep_create(wa, iface);  	if (result < 0) { @@ -59,8 +61,8 @@ int wa_create(struct wahc *wa, struct usb_interface *iface)  	return 0;  error_nep_create: -	kfree(wa->xfer_result); -error_xfer_result_alloc: +	kfree(wa->dti_buf); +error_dti_buf_alloc:  	wa_rpipes_destroy(wa);  error_rpipes_create:  	return result; @@ -73,10 +75,8 @@ void __wa_destroy(struct wahc *wa)  	if (wa->dti_urb) {  		usb_kill_urb(wa->dti_urb);  		usb_put_urb(wa->dti_urb); -		usb_kill_urb(wa->buf_in_urb); -		usb_put_urb(wa->buf_in_urb);  	} -	kfree(wa->xfer_result); +	kfree(wa->dti_buf);  	wa_nep_destroy(wa);  	wa_rpipes_destroy(wa);  } diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h index cf250c21e94..f2a8d29e17b 100644 --- a/drivers/usb/wusbcore/wa-hc.h +++ b/drivers/usb/wusbcore/wa-hc.h @@ -36,7 +36,7 @@   *   *  hcd        glue with the USB API Host Controller Interface API.   * - *  nep        Notification EndPoint managent: collect notifications + *  nep        Notification EndPoint management: collect notifications   *             and queue them with the workqueue daemon.   *   *             Handle notifications as coming from the NEP. Sends them @@ -117,11 +117,38 @@ struct wa_rpipe {  	struct wahc *wa;  	spinlock_t seg_lock;  	struct list_head seg_list; +	struct list_head list_node;  	atomic_t segs_available;  	u8 buffer[1];	/* For reads/writes on USB */  }; +enum wa_dti_state { +	WA_DTI_TRANSFER_RESULT_PENDING, +	WA_DTI_ISOC_PACKET_STATUS_PENDING, +	WA_DTI_BUF_IN_DATA_PENDING +}; + +enum wa_quirks { +	/* +	 * The Alereon HWA expects the data frames in isochronous transfer +	 * requests to be concatenated and not sent as separate packets. +	 */ +	WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC	= 0x01, +	/* +	 * The Alereon HWA can be instructed to not send transfer notifications +	 * as an optimization. +	 */ +	WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS	= 0x02, +}; + +enum wa_vendor_specific_requests { +	WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C, +	WA_REQ_ALEREON_FEATURE_SET = 0x01, +	WA_REQ_ALEREON_FEATURE_CLEAR = 0x00, +}; + +#define WA_MAX_BUF_IN_URBS	4  /**   * Instance of a HWA Host Controller   * @@ -130,7 +157,7 @@ struct wa_rpipe {   *   * @wa_descr  Can be accessed without locking because it is in   *            the same area where the device descriptors were - *            read, so it is guaranteed to exist umodified while + *            read, so it is guaranteed to exist unmodified while   *            the device exists.   *   *            Endianess has been converted to CPU's. @@ -153,8 +180,8 @@ struct wa_rpipe {   *                       submitted from an atomic context).   *   * FIXME: this needs to be layered up: a wusbhc layer (for sharing - *        comonalities with WHCI), a wa layer (for sharing - *        comonalities with DWA-RC). + *        commonalities with WHCI), a wa layer (for sharing + *        commonalities with DWA-RC).   */  struct wahc {  	struct usb_device *usb_dev; @@ -178,14 +205,28 @@ struct wahc {  	u16 rpipes;  	unsigned long *rpipe_bm;	/* rpipe usage bitmap */ -	spinlock_t rpipe_bm_lock;	/* protect rpipe_bm */ +	struct list_head rpipe_delayed_list;	/* delayed RPIPES. */ +	spinlock_t rpipe_lock;	/* protect rpipe_bm and delayed list */  	struct mutex rpipe_mutex;	/* assigning resources to endpoints */ +	/* +	 * dti_state is used to track the state of the dti_urb. When dti_state +	 * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and +	 * dti_isoc_xfer_seg identify which xfer the incoming isoc packet +	 * status refers to. +	 */ +	enum wa_dti_state dti_state; +	u32 dti_isoc_xfer_in_progress; +	u8  dti_isoc_xfer_seg;  	struct urb *dti_urb;		/* URB for reading xfer results */ -	struct urb *buf_in_urb;		/* URB for reading data in */ +					/* URBs for reading data in */ +	struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS]; +	int active_buf_in_urbs;		/* number of buf_in_urbs active. */  	struct edc dti_edc;		/* DTI error density counter */ -	struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */ -	size_t xfer_result_size; +	void *dti_buf; +	size_t dti_buf_size; + +	unsigned long dto_in_use;	/* protect dto endoint serialization */  	s32 status;			/* For reading status */ @@ -200,11 +241,15 @@ struct wahc {  	struct work_struct xfer_enqueue_work;  	struct work_struct xfer_error_work;  	atomic_t xfer_id_count; + +	kernel_ulong_t	quirks;  }; -extern int wa_create(struct wahc *wa, struct usb_interface *iface); +extern int wa_create(struct wahc *wa, struct usb_interface *iface, +	kernel_ulong_t);  extern void __wa_destroy(struct wahc *wa); +extern int wa_dti_start(struct wahc *wa);  void wa_reset_all(struct wahc *wa); @@ -239,14 +284,18 @@ static inline void wa_nep_disarm(struct wahc *wa)  /* RPipes */  static inline void wa_rpipe_init(struct wahc *wa)  { -	spin_lock_init(&wa->rpipe_bm_lock); +	INIT_LIST_HEAD(&wa->rpipe_delayed_list); +	spin_lock_init(&wa->rpipe_lock);  	mutex_init(&wa->rpipe_mutex);  }  static inline void wa_init(struct wahc *wa)  { +	int index; +  	edc_init(&wa->nep_edc);  	atomic_set(&wa->notifs_queued, 0); +	wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;  	wa_rpipe_init(wa);  	edc_init(&wa->dti_edc);  	INIT_LIST_HEAD(&wa->xfer_list); @@ -255,7 +304,12 @@ static inline void wa_init(struct wahc *wa)  	spin_lock_init(&wa->xfer_list_lock);  	INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);  	INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); +	wa->dto_in_use = 0;  	atomic_set(&wa->xfer_id_count, 1); +	/* init the buf in URBs */ +	for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) +		usb_init_urb(&(wa->buf_in_urbs[index])); +	wa->active_buf_in_urbs = 0;  }  /** @@ -300,7 +354,7 @@ static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)  /* Transferring data */  extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,  			  struct urb *, gfp_t); -extern int wa_urb_dequeue(struct wahc *, struct urb *); +extern int wa_urb_dequeue(struct wahc *, struct urb *, int);  extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *); @@ -313,7 +367,7 @@ extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);   *        it...no RC specific function is called...unless I miss   *        something.   * - * FIXME: has to go away in favour of an 'struct' hcd based sollution + * FIXME: has to go away in favour of a 'struct' hcd based solution   */  static inline struct wahc *wa_get(struct wahc *wa)  { @@ -334,7 +388,7 @@ static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)  			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,  			feature,  			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, -			NULL, 0, 1000 /* FIXME: arbitrary */); +			NULL, 0, USB_CTRL_SET_TIMEOUT);  } @@ -368,8 +422,7 @@ s32 __wa_get_status(struct wahc *wa)  		USB_REQ_GET_STATUS,  		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,  		0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, -		&wa->status, sizeof(wa->status), -		1000 /* FIXME: arbitrary */); +		&wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT);  	if (result >= 0)  		result = wa->status;  	return result; diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c index ada4e087062..60a10d21947 100644 --- a/drivers/usb/wusbcore/wa-nep.c +++ b/drivers/usb/wusbcore/wa-nep.c @@ -69,8 +69,8 @@ struct wa_notif_work {   * [the wuswad daemon, basically]   *   * @_nw:	Pointer to a descriptor which has the pointer to the - * 		@wa, the size of the buffer and the work queue - * 		structure (so we can free all when done). + *		@wa, the size of the buffer and the work queue + *		structure (so we can free all when done).   * @returns     0 if ok, < 0 errno code on error.   *   * All notifications follow the same format; they need to start with a @@ -93,7 +93,8 @@ static void wa_notif_dispatch(struct work_struct *ws)  {  	void *itr;  	u8 missing = 0; -	struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work); +	struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, +						work);  	struct wahc *wa = nw->wa;  	struct wa_notif_hdr *notif_hdr;  	size_t size; @@ -271,7 +272,8 @@ int wa_nep_create(struct wahc *wa, struct usb_interface *iface)  	wa->nep_buffer_size = 1024;  	wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);  	if (wa->nep_buffer == NULL) { -		dev_err(dev, "Unable to allocate notification's read buffer\n"); +		dev_err(dev, +			"Unable to allocate notification's read buffer\n");  		goto error_nep_buffer;  	}  	wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c index fd4f1ce6256..a80c5d284b5 100644 --- a/drivers/usb/wusbcore/wa-rpipe.c +++ b/drivers/usb/wusbcore/wa-rpipe.c @@ -57,7 +57,6 @@   *  urb->dev->devnum, to make sure that we always have the right   *  destination address.   */ -#include <linux/init.h>  #include <linux/atomic.h>  #include <linux/bitmap.h>  #include <linux/slab.h> @@ -80,7 +79,7 @@ static int __rpipe_get_descr(struct wahc *wa,  		USB_REQ_GET_DESCRIPTOR,  		USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,  		USB_DT_RPIPE<<8, index, descr, sizeof(*descr), -		1000 /* FIXME: arbitrary */); +		USB_CTRL_GET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "rpipe %u: get descriptor failed: %d\n",  			index, (int)result); @@ -118,7 +117,7 @@ static int __rpipe_set_descr(struct wahc *wa,  		USB_REQ_SET_DESCRIPTOR,  		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,  		USB_DT_RPIPE<<8, index, descr, sizeof(*descr), -		HZ / 10); +		USB_CTRL_SET_TIMEOUT);  	if (result < 0) {  		dev_err(dev, "rpipe %u: set descriptor failed: %d\n",  			index, (int)result); @@ -143,17 +142,18 @@ static void rpipe_init(struct wa_rpipe *rpipe)  	kref_init(&rpipe->refcnt);  	spin_lock_init(&rpipe->seg_lock);  	INIT_LIST_HEAD(&rpipe->seg_list); +	INIT_LIST_HEAD(&rpipe->list_node);  }  static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)  {  	unsigned long flags; -	spin_lock_irqsave(&wa->rpipe_bm_lock, flags); +	spin_lock_irqsave(&wa->rpipe_lock, flags);  	rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);  	if (rpipe_idx < wa->rpipes)  		set_bit(rpipe_idx, wa->rpipe_bm); -	spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); +	spin_unlock_irqrestore(&wa->rpipe_lock, flags);  	return rpipe_idx;  } @@ -162,9 +162,9 @@ static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)  {  	unsigned long flags; -	spin_lock_irqsave(&wa->rpipe_bm_lock, flags); +	spin_lock_irqsave(&wa->rpipe_lock, flags);  	clear_bit(rpipe_idx, wa->rpipe_bm); -	spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); +	spin_unlock_irqrestore(&wa->rpipe_lock, flags);  }  void rpipe_destroy(struct kref *_rpipe) @@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(rpipe_destroy);  /*   * Locate an idle rpipe, create an structure for it and return it   * - * @wa 	  is referenced and unlocked + * @wa	  is referenced and unlocked   * @crs   enum rpipe_attr, required endpoint characteristics   *   * The rpipe can be used only sequentially (not in parallel). @@ -236,7 +236,7 @@ static int __rpipe_reset(struct wahc *wa, unsigned index)  		wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),  		USB_REQ_RPIPE_RESET,  		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, -		0, index, NULL, 0, 1000 /* FIXME: arbitrary */); +		0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);  	if (result < 0)  		dev_err(dev, "rpipe %u: reset failed: %d\n",  			index, result); @@ -298,7 +298,7 @@ static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(  			break;  		}  		itr += hdr->bLength; -		itr_size -= hdr->bDescriptorType; +		itr_size -= hdr->bLength;  	}  out:  	return epcd; @@ -307,7 +307,7 @@ out:  /*   * Aim an rpipe to its device & endpoint destination   * - * Make sure we change the address to unauthenticathed if the device + * Make sure we change the address to unauthenticated if the device   * is WUSB and it is not authenticated.   */  static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, @@ -328,12 +328,16 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,  	}  	unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;  	__rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex)); -	atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests)); +	atomic_set(&rpipe->segs_available, +		le16_to_cpu(rpipe->descr.wRequests));  	/* FIXME: block allocation system; request with queuing and timeout */  	/* FIXME: compute so seg_size > ep->maxpktsize */  	rpipe->descr.wBlocks = cpu_to_le16(16);		/* given */  	/* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */ -	rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize); +	if (usb_endpoint_xfer_isoc(&ep->desc)) +		rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize; +	else +		rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;  	rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,  				epcd->bMaxBurst, 16U), 1U); @@ -361,8 +365,10 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,  			epcd->bMaxSequence, 32U), 2U);  	rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1;  	rpipe->descr.bInterval = ep->desc.bInterval; -	/* FIXME: bOverTheAirInterval */ -	rpipe->descr.bOverTheAirInterval = 0;	/* 0 if not isoc */ +	if (usb_endpoint_xfer_isoc(&ep->desc)) +		rpipe->descr.bOverTheAirInterval = epcd->bOverTheAirInterval; +	else +		rpipe->descr.bOverTheAirInterval = 0;	/* 0 if not isoc */  	/* FIXME: xmit power & preamble blah blah */  	rpipe->descr.bmAttribute = (ep->desc.bmAttributes &  					USB_ENDPOINT_XFERTYPE_MASK); @@ -477,7 +483,7 @@ error:   */  int wa_rpipes_create(struct wahc *wa)  { -	wa->rpipes = wa->wa_descr->wNumRPipes; +	wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);  	wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),  			       GFP_KERNEL);  	if (wa->rpipe_bm == NULL) @@ -518,10 +524,10 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)  		u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);  		usb_control_msg( -			wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), +			wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),  			USB_REQ_RPIPE_ABORT,  			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, -			0, index, NULL, 0, 1000 /* FIXME: arbitrary */); +			0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);  		rpipe_put(rpipe);  	}  	mutex_unlock(&wa->rpipe_mutex); @@ -539,12 +545,11 @@ void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)  		u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);  		usb_control_msg( -			wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), +			wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),  			USB_REQ_CLEAR_FEATURE,  			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, -			RPIPE_STALL, index, NULL, 0, 1000); +			RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT);  	}  	mutex_unlock(&wa->rpipe_mutex);  }  EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled); - diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 6ad02f57c36..3e2e4ed2015 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -79,7 +79,6 @@   *     availability of the different required components (blocks,   *     rpipes, segment slots, etc), we go scheduling them. Painful.   */ -#include <linux/init.h>  #include <linux/spinlock.h>  #include <linux/slab.h>  #include <linux/hash.h> @@ -91,7 +90,8 @@  #include "wusbhc.h"  enum { -	WA_SEGS_MAX = 255, +	/* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */ +	WA_SEGS_MAX = 128,  };  enum wa_seg_status { @@ -107,6 +107,7 @@ enum wa_seg_status {  };  static void wa_xfer_delayed_run(struct wa_rpipe *); +static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);  /*   * Life cycle governed by 'struct urb' (the refcount of the struct is @@ -114,24 +115,29 @@ static void wa_xfer_delayed_run(struct wa_rpipe *);   * struct).   */  struct wa_seg { -	struct urb urb; -	struct urb *dto_urb;		/* for data output? */ +	struct urb tr_urb;		/* transfer request urb. */ +	struct urb *isoc_pack_desc_urb;	/* for isoc packet descriptor. */ +	struct urb *dto_urb;		/* for data output. */  	struct list_head list_node;	/* for rpipe->req_list */  	struct wa_xfer *xfer;		/* out xfer */  	u8 index;			/* which segment we are */ +	int isoc_frame_count;	/* number of isoc frames in this segment. */ +	int isoc_frame_offset;	/* starting frame offset in the xfer URB. */ +	/* Isoc frame that the current transfer buffer corresponds to. */ +	int isoc_frame_index; +	int isoc_size;	/* size of all isoc frames sent by this seg. */  	enum wa_seg_status status;  	ssize_t result;			/* bytes xfered or error */  	struct wa_xfer_hdr xfer_hdr; -	u8 xfer_extra[];		/* xtra space for xfer_hdr_ctl */  };  static inline void wa_seg_init(struct wa_seg *seg)  { -	usb_init_urb(&seg->urb); +	usb_init_urb(&seg->tr_urb);  	/* set the remaining memory to 0. */ -	memset(((void *)seg) + sizeof(seg->urb), 0, -		sizeof(*seg) - sizeof(seg->urb)); +	memset(((void *)seg) + sizeof(seg->tr_urb), 0, +		sizeof(*seg) - sizeof(seg->tr_urb));  }  /* @@ -159,6 +165,11 @@ struct wa_xfer {  	struct wusb_dev *wusb_dev;	/* for activity timestamps */  }; +static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, +	struct wa_seg *seg, int curr_iso_frame); +static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, +		int starting_index, enum wa_seg_status status); +  static inline void wa_xfer_init(struct wa_xfer *xfer)  {  	kref_init(&xfer->refcnt); @@ -169,7 +180,7 @@ static inline void wa_xfer_init(struct wa_xfer *xfer)  /*   * Destroy a transfer structure   * - * Note that freeing xfer->seg[cnt]->urb will free the containing + * Note that freeing xfer->seg[cnt]->tr_urb will free the containing   * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.   */  static void wa_xfer_destroy(struct kref *_xfer) @@ -178,9 +189,17 @@ static void wa_xfer_destroy(struct kref *_xfer)  	if (xfer->seg) {  		unsigned cnt;  		for (cnt = 0; cnt < xfer->segs; cnt++) { -			usb_free_urb(xfer->seg[cnt]->dto_urb); -			usb_free_urb(&xfer->seg[cnt]->urb); +			struct wa_seg *seg = xfer->seg[cnt]; +			if (seg) { +				usb_free_urb(seg->isoc_pack_desc_urb); +				if (seg->dto_urb) { +					kfree(seg->dto_urb->sg); +					usb_free_urb(seg->dto_urb); +				} +				usb_free_urb(&seg->tr_urb); +			}  		} +		kfree(xfer->seg);  	}  	kfree(xfer);  } @@ -196,6 +215,59 @@ static void wa_xfer_put(struct wa_xfer *xfer)  }  /* + * Try to get exclusive access to the DTO endpoint resource.  Return true + * if successful. + */ +static inline int __wa_dto_try_get(struct wahc *wa) +{ +	return (test_and_set_bit(0, &wa->dto_in_use) == 0); +} + +/* Release the DTO endpoint resource. */ +static inline void __wa_dto_put(struct wahc *wa) +{ +	clear_bit_unlock(0, &wa->dto_in_use); +} + +/* Service RPIPEs that are waiting on the DTO resource. */ +static void wa_check_for_delayed_rpipes(struct wahc *wa) +{ +	unsigned long flags; +	int dto_waiting = 0; +	struct wa_rpipe *rpipe; + +	spin_lock_irqsave(&wa->rpipe_lock, flags); +	while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) { +		rpipe = list_first_entry(&wa->rpipe_delayed_list, +				struct wa_rpipe, list_node); +		__wa_xfer_delayed_run(rpipe, &dto_waiting); +		/* remove this RPIPE from the list if it is not waiting. */ +		if (!dto_waiting) { +			pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n", +				__func__, +				le16_to_cpu(rpipe->descr.wRPipeIndex)); +			list_del_init(&rpipe->list_node); +		} +	} +	spin_unlock_irqrestore(&wa->rpipe_lock, flags); +} + +/* add this RPIPE to the end of the delayed RPIPE list. */ +static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe) +{ +	unsigned long flags; + +	spin_lock_irqsave(&wa->rpipe_lock, flags); +	/* add rpipe to the list if it is not already on it. */ +	if (list_empty(&rpipe->list_node)) { +		pr_debug("%s: adding RPIPE %d to the delayed list.\n", +			__func__, le16_to_cpu(rpipe->descr.wRPipeIndex)); +		list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list); +	} +	spin_unlock_irqrestore(&wa->rpipe_lock, flags); +} + +/*   * xfer is referenced   *   * xfer->lock has to be unlocked @@ -211,6 +283,7 @@ static void wa_xfer_giveback(struct wa_xfer *xfer)  	spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);  	list_del_init(&xfer->list_node); +	usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);  	spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);  	/* FIXME: segmentation broken -- kills DWA */  	wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); @@ -232,6 +305,31 @@ static void wa_xfer_completion(struct wa_xfer *xfer)  }  /* + * Initialize a transfer's ID + * + * We need to use a sequential number; if we use the pointer or the + * hash of the pointer, it can repeat over sequential transfers and + * then it will confuse the HWA....wonder why in hell they put a 32 + * bit handle in there then. + */ +static void wa_xfer_id_init(struct wa_xfer *xfer) +{ +	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); +} + +/* Return the xfer's ID. */ +static inline u32 wa_xfer_id(struct wa_xfer *xfer) +{ +	return xfer->id; +} + +/* Return the xfer's ID in transport format (little endian). */ +static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer) +{ +	return cpu_to_le32(xfer->id); +} + +/*   * If transfer is done, wrap it up and return true   *   * xfer->lock has to be locked @@ -253,33 +351,37 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)  		switch (seg->status) {  		case WA_SEG_DONE:  			if (found_short && seg->result > 0) { -				dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", -					xfer, cnt, seg->result); +				dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n", +					xfer, wa_xfer_id(xfer), cnt, +					seg->result);  				urb->status = -EINVAL;  				goto out;  			}  			urb->actual_length += seg->result; -			if (seg->result < xfer->seg_size +			if (!(usb_pipeisoc(xfer->urb->pipe)) +				&& seg->result < xfer->seg_size  			    && cnt != xfer->segs-1)  				found_short = 1; -			dev_dbg(dev, "xfer %p#%u: DONE short %d " +			dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "  				"result %zu urb->actual_length %d\n", -				xfer, seg->index, found_short, seg->result, -				urb->actual_length); +				xfer, wa_xfer_id(xfer), seg->index, found_short, +				seg->result, urb->actual_length);  			break;  		case WA_SEG_ERROR:  			xfer->result = seg->result; -			dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", -				xfer, seg->index, seg->result); +			dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n", +				xfer, wa_xfer_id(xfer), seg->index, seg->result, +				seg->result);  			goto out;  		case WA_SEG_ABORTED: -			dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", -				xfer, seg->index, urb->status); -			xfer->result = urb->status; +			xfer->result = seg->result; +			dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n", +				xfer, wa_xfer_id(xfer), seg->index, seg->result, +				seg->result);  			goto out;  		default: -			dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", -				 xfer, cnt, seg->status); +			dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n", +				 xfer, wa_xfer_id(xfer), cnt, seg->status);  			xfer->result = -EINVAL;  			goto out;  		} @@ -290,26 +392,21 @@ out:  }  /* - * Initialize a transfer's ID + * Mark the given segment as done.  Return true if this completes the xfer. + * This should only be called for segs that have been submitted to an RPIPE. + * Delayed segs are not marked as submitted so they do not need to be marked + * as done when cleaning up.   * - * We need to use a sequential number; if we use the pointer or the - * hash of the pointer, it can repeat over sequential transfers and - * then it will confuse the HWA....wonder why in hell they put a 32 - * bit handle in there then. + * xfer->lock has to be locked   */ -static void wa_xfer_id_init(struct wa_xfer *xfer) +static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer, +	struct wa_seg *seg, enum wa_seg_status status)  { -	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); -} +	seg->status = status; +	xfer->segs_done++; -/* - * Return the xfer's ID associated with xfer - * - * Need to generate a - */ -static u32 wa_xfer_id(struct wa_xfer *xfer) -{ -	return xfer->id; +	/* check for done. */ +	return __wa_xfer_is_done(xfer);  }  /* @@ -339,12 +436,51 @@ out:  struct wa_xfer_abort_buffer {  	struct urb urb; +	struct wahc *wa;  	struct wa_xfer_abort cmd;  };  static void __wa_xfer_abort_cb(struct urb *urb)  {  	struct wa_xfer_abort_buffer *b = urb->context; +	struct wahc *wa = b->wa; + +	/* +	 * If the abort request URB failed, then the HWA did not get the abort +	 * command.  Forcibly clean up the xfer without waiting for a Transfer +	 * Result from the HWA. +	 */ +	if (urb->status < 0) { +		struct wa_xfer *xfer; +		struct device *dev = &wa->usb_iface->dev; + +		xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID)); +		dev_err(dev, "%s: Transfer Abort request failed. result: %d\n", +			__func__, urb->status); +		if (xfer) { +			unsigned long flags; +			int done; +			struct wa_rpipe *rpipe = xfer->ep->hcpriv; + +			dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n", +				__func__, xfer, wa_xfer_id(xfer)); +			spin_lock_irqsave(&xfer->lock, flags); +			/* mark all segs as aborted. */ +			wa_complete_remaining_xfer_segs(xfer, 0, +				WA_SEG_ABORTED); +			done = __wa_xfer_is_done(xfer); +			spin_unlock_irqrestore(&xfer->lock, flags); +			if (done) +				wa_xfer_completion(xfer); +			wa_xfer_delayed_run(rpipe); +			wa_xfer_put(xfer); +		} else { +			dev_err(dev, "%s: xfer ID 0x%08X already gone.\n", +				 __func__, le32_to_cpu(b->cmd.dwTransferID)); +		} +	} + +	wa_put(wa);	/* taken in __wa_xfer_abort */  	usb_put_urb(&b->urb);  } @@ -356,15 +492,11 @@ static void __wa_xfer_abort_cb(struct urb *urb)   *   * The callback (see above) does nothing but freeing up the data by   * putting the URB. Because the URB is allocated at the head of the - * struct, the whole space we allocated is kfreed. - * - * We'll get an 'aborted transaction' xfer result on DTI, that'll - * politely ignore because at this point the transaction has been - * marked as aborted already. + * struct, the whole space we allocated is kfreed. *   */ -static void __wa_xfer_abort(struct wa_xfer *xfer) +static int __wa_xfer_abort(struct wa_xfer *xfer)  { -	int result; +	int result = -ENOMEM;  	struct device *dev = &xfer->wa->usb_iface->dev;  	struct wa_xfer_abort_buffer *b;  	struct wa_rpipe *rpipe = xfer->ep->hcpriv; @@ -375,7 +507,8 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)  	b->cmd.bLength =  sizeof(b->cmd);  	b->cmd.bRequestType = WA_XFER_ABORT;  	b->cmd.wRPipe = rpipe->descr.wRPipeIndex; -	b->cmd.dwTransferID = wa_xfer_id(xfer); +	b->cmd.dwTransferID = wa_xfer_id_le32(xfer); +	b->wa = wa_get(xfer->wa);  	usb_init_urb(&b->urb);  	usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, @@ -385,20 +518,63 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)  	result = usb_submit_urb(&b->urb, GFP_ATOMIC);  	if (result < 0)  		goto error_submit; -	return;				/* callback frees! */ +	return result;				/* callback frees! */  error_submit: +	wa_put(xfer->wa);  	if (printk_ratelimit())  		dev_err(dev, "xfer %p: Can't submit abort request: %d\n",  			xfer, result);  	kfree(b);  error_kmalloc: -	return; +	return result;  }  /* + * Calculate the number of isoc frames starting from isoc_frame_offset + * that will fit a in transfer segment. + */ +static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer, +	int isoc_frame_offset, int *total_size) +{ +	int segment_size = 0, frame_count = 0; +	int index = isoc_frame_offset; +	struct usb_iso_packet_descriptor *iso_frame_desc = +		xfer->urb->iso_frame_desc; + +	while ((index < xfer->urb->number_of_packets) +		&& ((segment_size + iso_frame_desc[index].length) +				<= xfer->seg_size)) { +		/* +		 * For Alereon HWA devices, only include an isoc frame in an +		 * out segment if it is physically contiguous with the previous +		 * frame.  This is required because those devices expect +		 * the isoc frames to be sent as a single USB transaction as +		 * opposed to one transaction per frame with standard HWA. +		 */ +		if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) +			&& (xfer->is_inbound == 0) +			&& (index > isoc_frame_offset) +			&& ((iso_frame_desc[index - 1].offset + +				iso_frame_desc[index - 1].length) != +				iso_frame_desc[index].offset)) +			break; + +		/* this frame fits. count it. */ +		++frame_count; +		segment_size += iso_frame_desc[index].length; + +		/* move to the next isoc frame. */ +		++index; +	} + +	*total_size = segment_size; +	return frame_count; +} + +/*   *   * @returns < 0 on error, transfer segment request size if ok   */ @@ -422,43 +598,85 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,  		result = sizeof(struct wa_xfer_bi);  		break;  	case USB_ENDPOINT_XFER_ISOC: -		dev_err(dev, "FIXME: ISOC not implemented\n"); -		result = -ENOSYS; -		goto error; +		*pxfer_type = WA_XFER_TYPE_ISO; +		result = sizeof(struct wa_xfer_hwaiso); +		break;  	default:  		/* never happens */  		BUG();  		result = -EINVAL;	/* shut gcc up */ -	}; +	}  	xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;  	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; + +	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);  	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)  		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);  	/* Compute the segment size and make sure it is a multiple of  	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of  	 * a check (FIXME) */ -	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);  	if (xfer->seg_size < maxpktsize) { -		dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " -			"%zu\n", xfer->seg_size, maxpktsize); +		dev_err(dev, +			"HW BUG? seg_size %zu smaller than maxpktsize %zu\n", +			xfer->seg_size, maxpktsize);  		result = -EINVAL;  		goto error;  	}  	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; -	xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size); -	if (xfer->segs >= WA_SEGS_MAX) { -		dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", -			(int)(urb->transfer_buffer_length / xfer->seg_size), +	if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) { +		int index = 0; + +		xfer->segs = 0; +		/* +		 * loop over urb->number_of_packets to determine how many +		 * xfer segments will be needed to send the isoc frames. +		 */ +		while (index < urb->number_of_packets) { +			int seg_size; /* don't care. */ +			index += __wa_seg_calculate_isoc_frame_count(xfer, +					index, &seg_size); +			++xfer->segs; +		} +	} else { +		xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, +						xfer->seg_size); +		if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) +			xfer->segs = 1; +	} + +	if (xfer->segs > WA_SEGS_MAX) { +		dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n", +			(urb->transfer_buffer_length/xfer->seg_size),  			WA_SEGS_MAX);  		result = -EINVAL;  		goto error;  	} -	if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) -		xfer->segs = 1;  error:  	return result;  } +static void __wa_setup_isoc_packet_descr( +		struct wa_xfer_packet_info_hwaiso *packet_desc, +		struct wa_xfer *xfer, +		struct wa_seg *seg) { +	struct usb_iso_packet_descriptor *iso_frame_desc = +		xfer->urb->iso_frame_desc; +	int frame_index; + +	/* populate isoc packet descriptor. */ +	packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO; +	packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) + +		(sizeof(packet_desc->PacketLength[0]) * +			seg->isoc_frame_count)); +	for (frame_index = 0; frame_index < seg->isoc_frame_count; +		++frame_index) { +		int offset_index = frame_index + seg->isoc_frame_offset; +		packet_desc->PacketLength[frame_index] = +			cpu_to_le16(iso_frame_desc[offset_index].length); +	} +} + +  /* Fill in the common request header and xfer-type specific data. */  static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  				 struct wa_xfer_hdr *xfer_hdr0, @@ -466,12 +684,13 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  				 size_t xfer_hdr_size)  {  	struct wa_rpipe *rpipe = xfer->ep->hcpriv; +	struct wa_seg *seg = xfer->seg[0]; -	xfer_hdr0 = &xfer->seg[0]->xfer_hdr; +	xfer_hdr0 = &seg->xfer_hdr;  	xfer_hdr0->bLength = xfer_hdr_size;  	xfer_hdr0->bRequestType = xfer_type;  	xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; -	xfer_hdr0->dwTransferID = wa_xfer_id(xfer); +	xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);  	xfer_hdr0->bTransferSegment = 0;  	switch (xfer_type) {  	case WA_XFER_TYPE_CTL: { @@ -484,8 +703,18 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  	}  	case WA_XFER_TYPE_BI:  		break; -	case WA_XFER_TYPE_ISO: -		printk(KERN_ERR "FIXME: ISOC not implemented\n"); +	case WA_XFER_TYPE_ISO: { +		struct wa_xfer_hwaiso *xfer_iso = +			container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr); +		struct wa_xfer_packet_info_hwaiso *packet_desc = +			((void *)xfer_iso) + xfer_hdr_size; + +		/* populate the isoc section of the transfer request. */ +		xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count); +		/* populate isoc packet descriptor. */ +		__wa_setup_isoc_packet_descr(packet_desc, xfer, seg); +		break; +	}  	default:  		BUG();  	}; @@ -494,12 +723,12 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  /*   * Callback for the OUT data phase of the segment request   * - * Check wa_seg_cb(); most comments also apply here because this + * Check wa_seg_tr_cb(); most comments also apply here because this   * function does almost the same thing and they work closely   * together.   *   * If the seg request has failed but this DTO phase has succeeded, - * wa_seg_cb() has already failed the segment and moved the + * wa_seg_tr_cb() has already failed the segment and moved the   * status to WA_SEG_ERROR, so this will go through 'case 0' and   * effectively do nothing.   */ @@ -512,6 +741,143 @@ static void wa_seg_dto_cb(struct urb *urb)  	struct wa_rpipe *rpipe;  	unsigned long flags;  	unsigned rpipe_ready = 0; +	int data_send_done = 1, release_dto = 0, holding_dto = 0; +	u8 done = 0; +	int result; + +	/* free the sg if it was used. */ +	kfree(urb->sg); +	urb->sg = NULL; + +	spin_lock_irqsave(&xfer->lock, flags); +	wa = xfer->wa; +	dev = &wa->usb_iface->dev; +	if (usb_pipeisoc(xfer->urb->pipe)) { +		/* Alereon HWA sends all isoc frames in a single transfer. */ +		if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) +			seg->isoc_frame_index += seg->isoc_frame_count; +		else +			seg->isoc_frame_index += 1; +		if (seg->isoc_frame_index < seg->isoc_frame_count) { +			data_send_done = 0; +			holding_dto = 1; /* checked in error cases. */ +			/* +			 * if this is the last isoc frame of the segment, we +			 * can release DTO after sending this frame. +			 */ +			if ((seg->isoc_frame_index + 1) >= +				seg->isoc_frame_count) +				release_dto = 1; +		} +		dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n", +			wa_xfer_id(xfer), seg->index, seg->isoc_frame_index, +			holding_dto, release_dto); +	} +	spin_unlock_irqrestore(&xfer->lock, flags); + +	switch (urb->status) { +	case 0: +		spin_lock_irqsave(&xfer->lock, flags); +		seg->result += urb->actual_length; +		if (data_send_done) { +			dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n", +				wa_xfer_id(xfer), seg->index, seg->result); +			if (seg->status < WA_SEG_PENDING) +				seg->status = WA_SEG_PENDING; +		} else { +			/* should only hit this for isoc xfers. */ +			/* +			 * Populate the dto URB with the next isoc frame buffer, +			 * send the URB and release DTO if we no longer need it. +			 */ +			 __wa_populate_dto_urb_isoc(xfer, seg, +				seg->isoc_frame_offset + seg->isoc_frame_index); + +			/* resubmit the URB with the next isoc frame. */ +			/* take a ref on resubmit. */ +			wa_xfer_get(xfer); +			result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); +			if (result < 0) { +				dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n", +				       wa_xfer_id(xfer), seg->index, result); +				spin_unlock_irqrestore(&xfer->lock, flags); +				goto error_dto_submit; +			} +		} +		spin_unlock_irqrestore(&xfer->lock, flags); +		if (release_dto) { +			__wa_dto_put(wa); +			wa_check_for_delayed_rpipes(wa); +		} +		break; +	case -ECONNRESET:	/* URB unlinked; no need to do anything */ +	case -ENOENT:		/* as it was done by the who unlinked us */ +		if (holding_dto) { +			__wa_dto_put(wa); +			wa_check_for_delayed_rpipes(wa); +		} +		break; +	default:		/* Other errors ... */ +		dev_err(dev, "xfer 0x%08X#%u: data out error %d\n", +			wa_xfer_id(xfer), seg->index, urb->status); +		goto error_default; +	} + +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer); +	return; + +error_dto_submit: +	/* taken on resubmit attempt. */ +	wa_xfer_put(xfer); +error_default: +	spin_lock_irqsave(&xfer->lock, flags); +	rpipe = xfer->ep->hcpriv; +	if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, +		    EDC_ERROR_TIMEFRAME)){ +		dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n"); +		wa_reset_all(wa); +	} +	if (seg->status != WA_SEG_ERROR) { +		seg->result = urb->status; +		__wa_xfer_abort(xfer); +		rpipe_ready = rpipe_avail_inc(rpipe); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); +	} +	spin_unlock_irqrestore(&xfer->lock, flags); +	if (holding_dto) { +		__wa_dto_put(wa); +		wa_check_for_delayed_rpipes(wa); +	} +	if (done) +		wa_xfer_completion(xfer); +	if (rpipe_ready) +		wa_xfer_delayed_run(rpipe); +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer); +} + +/* + * Callback for the isoc packet descriptor phase of the segment request + * + * Check wa_seg_tr_cb(); most comments also apply here because this + * function does almost the same thing and they work closely + * together. + * + * If the seg request has failed but this phase has succeeded, + * wa_seg_tr_cb() has already failed the segment and moved the + * status to WA_SEG_ERROR, so this will go through 'case 0' and + * effectively do nothing. + */ +static void wa_seg_iso_pack_desc_cb(struct urb *urb) +{ +	struct wa_seg *seg = urb->context; +	struct wa_xfer *xfer = seg->xfer; +	struct wahc *wa; +	struct device *dev; +	struct wa_rpipe *rpipe; +	unsigned long flags; +	unsigned rpipe_ready = 0;  	u8 done = 0;  	switch (urb->status) { @@ -519,11 +885,10 @@ static void wa_seg_dto_cb(struct urb *urb)  		spin_lock_irqsave(&xfer->lock, flags);  		wa = xfer->wa;  		dev = &wa->usb_iface->dev; -		dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", -			xfer, seg->index, urb->actual_length); -		if (seg->status < WA_SEG_PENDING) +		dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n", +			wa_xfer_id(xfer), seg->index); +		if (xfer->is_inbound && seg->status < WA_SEG_PENDING)  			seg->status = WA_SEG_PENDING; -		seg->result = urb->actual_length;  		spin_unlock_irqrestore(&xfer->lock, flags);  		break;  	case -ECONNRESET:	/* URB unlinked; no need to do anything */ @@ -534,21 +899,20 @@ static void wa_seg_dto_cb(struct urb *urb)  		wa = xfer->wa;  		dev = &wa->usb_iface->dev;  		rpipe = xfer->ep->hcpriv; -		dev_dbg(dev, "xfer %p#%u: data out error %d\n", -			xfer, seg->index, urb->status); +		pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n", +				wa_xfer_id(xfer), seg->index, urb->status);  		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,  			    EDC_ERROR_TIMEFRAME)){ -			dev_err(dev, "DTO: URB max acceptable errors " -				"exceeded, resetting device\n"); +			dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");  			wa_reset_all(wa);  		}  		if (seg->status != WA_SEG_ERROR) { -			seg->status = WA_SEG_ERROR; +			usb_unlink_urb(seg->dto_urb);  			seg->result = urb->status; -			xfer->segs_done++;  			__wa_xfer_abort(xfer);  			rpipe_ready = rpipe_avail_inc(rpipe); -			done = __wa_xfer_is_done(xfer); +			done = __wa_xfer_mark_seg_as_done(xfer, seg, +					WA_SEG_ERROR);  		}  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done) @@ -556,6 +920,8 @@ static void wa_seg_dto_cb(struct urb *urb)  		if (rpipe_ready)  			wa_xfer_delayed_run(rpipe);  	} +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer);  }  /* @@ -572,11 +938,11 @@ static void wa_seg_dto_cb(struct urb *urb)   * We have to check before setting the status to WA_SEG_PENDING   * because sometimes the xfer result callback arrives before this   * callback (geeeeeeze), so it might happen that we are already in - * another state. As well, we don't set it if the transfer is inbound, + * another state. As well, we don't set it if the transfer is not inbound,   * as in that case, wa_seg_dto_cb will do it when the OUT data phase   * finishes.   */ -static void wa_seg_cb(struct urb *urb) +static void wa_seg_tr_cb(struct urb *urb)  {  	struct wa_seg *seg = urb->context;  	struct wa_xfer *xfer = seg->xfer; @@ -592,8 +958,11 @@ static void wa_seg_cb(struct urb *urb)  		spin_lock_irqsave(&xfer->lock, flags);  		wa = xfer->wa;  		dev = &wa->usb_iface->dev; -		dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); -		if (xfer->is_inbound && seg->status < WA_SEG_PENDING) +		dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n", +			xfer, wa_xfer_id(xfer), seg->index); +		if (xfer->is_inbound && +			seg->status < WA_SEG_PENDING && +			!(usb_pipeisoc(xfer->urb->pipe)))  			seg->status = WA_SEG_PENDING;  		spin_unlock_irqrestore(&xfer->lock, flags);  		break; @@ -606,35 +975,39 @@ static void wa_seg_cb(struct urb *urb)  		dev = &wa->usb_iface->dev;  		rpipe = xfer->ep->hcpriv;  		if (printk_ratelimit()) -			dev_err(dev, "xfer %p#%u: request error %d\n", -				xfer, seg->index, urb->status); +			dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n", +				xfer, wa_xfer_id(xfer), seg->index, +				urb->status);  		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,  			    EDC_ERROR_TIMEFRAME)){  			dev_err(dev, "DTO: URB max acceptable errors "  				"exceeded, resetting device\n");  			wa_reset_all(wa);  		} +		usb_unlink_urb(seg->isoc_pack_desc_urb);  		usb_unlink_urb(seg->dto_urb); -		seg->status = WA_SEG_ERROR;  		seg->result = urb->status; -		xfer->segs_done++;  		__wa_xfer_abort(xfer);  		rpipe_ready = rpipe_avail_inc(rpipe); -		done = __wa_xfer_is_done(xfer); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done)  			wa_xfer_completion(xfer);  		if (rpipe_ready)  			wa_xfer_delayed_run(rpipe);  	} +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer);  } -/* allocate an SG list to store bytes_to_transfer bytes and copy the +/* + * Allocate an SG list to store bytes_to_transfer bytes and copy the   * subset of the in_sg that matches the buffer subset - * we are about to transfer. */ + * we are about to transfer. + */  static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,  	const unsigned int bytes_transferred, -	const unsigned int bytes_to_transfer, unsigned int *out_num_sgs) +	const unsigned int bytes_to_transfer, int *out_num_sgs)  {  	struct scatterlist *out_sg;  	unsigned int bytes_processed = 0, offset_into_current_page_data = 0, @@ -710,6 +1083,75 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,  }  /* + * Populate DMA buffer info for the isoc dto urb. + */ +static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, +	struct wa_seg *seg, int curr_iso_frame) +{ +	seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +	seg->dto_urb->sg = NULL; +	seg->dto_urb->num_sgs = 0; +	/* dto urb buffer address pulled from iso_frame_desc. */ +	seg->dto_urb->transfer_dma = xfer->urb->transfer_dma + +		xfer->urb->iso_frame_desc[curr_iso_frame].offset; +	/* The Alereon HWA sends a single URB with all isoc segs. */ +	if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) +		seg->dto_urb->transfer_buffer_length = seg->isoc_size; +	else +		seg->dto_urb->transfer_buffer_length = +			xfer->urb->iso_frame_desc[curr_iso_frame].length; +} + +/* + * Populate buffer ptr and size, DMA buffer or SG list for the dto urb. + */ +static int __wa_populate_dto_urb(struct wa_xfer *xfer, +	struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size) +{ +	int result = 0; + +	if (xfer->is_dma) { +		seg->dto_urb->transfer_dma = +			xfer->urb->transfer_dma + buf_itr_offset; +		seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +		seg->dto_urb->sg = NULL; +		seg->dto_urb->num_sgs = 0; +	} else { +		/* do buffer or SG processing. */ +		seg->dto_urb->transfer_flags &= +			~URB_NO_TRANSFER_DMA_MAP; +		/* this should always be 0 before a resubmit. */ +		seg->dto_urb->num_mapped_sgs = 0; + +		if (xfer->urb->transfer_buffer) { +			seg->dto_urb->transfer_buffer = +				xfer->urb->transfer_buffer + +				buf_itr_offset; +			seg->dto_urb->sg = NULL; +			seg->dto_urb->num_sgs = 0; +		} else { +			seg->dto_urb->transfer_buffer = NULL; + +			/* +			 * allocate an SG list to store seg_size bytes +			 * and copy the subset of the xfer->urb->sg that +			 * matches the buffer subset we are about to +			 * read. +			 */ +			seg->dto_urb->sg = wa_xfer_create_subset_sg( +				xfer->urb->sg, +				buf_itr_offset, buf_itr_size, +				&(seg->dto_urb->num_sgs)); +			if (!(seg->dto_urb->sg)) +				result = -ENOMEM; +		} +	} +	seg->dto_urb->transfer_buffer_length = buf_itr_size; + +	return result; +} + +/*   * Allocate the segs array and initialize each of them   *   * The segments are freed by wa_xfer_destroy() when the xfer use count @@ -719,7 +1161,7 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,   */  static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)  { -	int result, cnt; +	int result, cnt, isoc_frame_offset = 0;  	size_t alloc_size = sizeof(*xfer->seg[0])  		- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;  	struct usb_device *usb_dev = xfer->wa->usb_dev; @@ -734,18 +1176,63 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)  	buf_itr = 0;  	buf_size = xfer->urb->transfer_buffer_length;  	for (cnt = 0; cnt < xfer->segs; cnt++) { -		seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC); +		size_t iso_pkt_descr_size = 0; +		int seg_isoc_frame_count = 0, seg_isoc_size = 0; + +		/* +		 * Adjust the size of the segment object to contain space for +		 * the isoc packet descriptor buffer. +		 */ +		if (usb_pipeisoc(xfer->urb->pipe)) { +			seg_isoc_frame_count = +				__wa_seg_calculate_isoc_frame_count(xfer, +					isoc_frame_offset, &seg_isoc_size); + +			iso_pkt_descr_size = +				sizeof(struct wa_xfer_packet_info_hwaiso) + +				(seg_isoc_frame_count * sizeof(__le16)); +		} +		seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size, +						GFP_ATOMIC);  		if (seg == NULL)  			goto error_seg_kmalloc;  		wa_seg_init(seg);  		seg->xfer = xfer;  		seg->index = cnt; -		usb_fill_bulk_urb(&seg->urb, usb_dev, +		usb_fill_bulk_urb(&seg->tr_urb, usb_dev,  				  usb_sndbulkpipe(usb_dev,  						  dto_epd->bEndpointAddress),  				  &seg->xfer_hdr, xfer_hdr_size, -				  wa_seg_cb, seg); +				  wa_seg_tr_cb, seg);  		buf_itr_size = min(buf_size, xfer->seg_size); + +		if (usb_pipeisoc(xfer->urb->pipe)) { +			seg->isoc_frame_count = seg_isoc_frame_count; +			seg->isoc_frame_offset = isoc_frame_offset; +			seg->isoc_size = seg_isoc_size; +			/* iso packet descriptor. */ +			seg->isoc_pack_desc_urb = +					usb_alloc_urb(0, GFP_ATOMIC); +			if (seg->isoc_pack_desc_urb == NULL) +				goto error_iso_pack_desc_alloc; +			/* +			 * The buffer for the isoc packet descriptor starts +			 * after the transfer request header in the +			 * segment object memory buffer. +			 */ +			usb_fill_bulk_urb( +				seg->isoc_pack_desc_urb, usb_dev, +				usb_sndbulkpipe(usb_dev, +					dto_epd->bEndpointAddress), +				(void *)(&seg->xfer_hdr) + +					xfer_hdr_size, +				iso_pkt_descr_size, +				wa_seg_iso_pack_desc_cb, seg); + +			/* adjust starting frame offset for next seg. */ +			isoc_frame_offset += seg_isoc_frame_count; +		} +  		if (xfer->is_inbound == 0 && buf_size > 0) {  			/* outbound data. */  			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); @@ -756,69 +1243,44 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)  				usb_sndbulkpipe(usb_dev,  						dto_epd->bEndpointAddress),  				NULL, 0, wa_seg_dto_cb, seg); -			if (xfer->is_dma) { -				seg->dto_urb->transfer_dma = -					xfer->urb->transfer_dma + buf_itr; -				seg->dto_urb->transfer_flags |= -					URB_NO_TRANSFER_DMA_MAP; -				seg->dto_urb->transfer_buffer = NULL; -				seg->dto_urb->sg = NULL; -				seg->dto_urb->num_sgs = 0; + +			if (usb_pipeisoc(xfer->urb->pipe)) { +				/* +				 * Fill in the xfer buffer information for the +				 * first isoc frame.  Subsequent frames in this +				 * segment will be filled in and sent from the +				 * DTO completion routine, if needed. +				 */ +				__wa_populate_dto_urb_isoc(xfer, seg, +					seg->isoc_frame_offset);  			} else { -				/* do buffer or SG processing. */ -				seg->dto_urb->transfer_flags &= -					~URB_NO_TRANSFER_DMA_MAP; -				/* this should always be 0 before a resubmit. */ -				seg->dto_urb->num_mapped_sgs = 0; - -				if (xfer->urb->transfer_buffer) { -					seg->dto_urb->transfer_buffer = -						xfer->urb->transfer_buffer + -						buf_itr; -					seg->dto_urb->sg = NULL; -					seg->dto_urb->num_sgs = 0; -				} else { -					/* allocate an SG list to store seg_size -					    bytes and copy the subset of the -					    xfer->urb->sg that matches the -					    buffer subset we are about to read. -					*/ -					seg->dto_urb->sg = -						wa_xfer_create_subset_sg( -						xfer->urb->sg, -						buf_itr, buf_itr_size, -						&(seg->dto_urb->num_sgs)); - -					if (!(seg->dto_urb->sg)) { -						seg->dto_urb->num_sgs	= 0; -						goto error_sg_alloc; -					} - -					seg->dto_urb->transfer_buffer = NULL; -				} +				/* fill in the xfer buffer information. */ +				result = __wa_populate_dto_urb(xfer, seg, +							buf_itr, buf_itr_size); +				if (result < 0) +					goto error_seg_outbound_populate; + +				buf_itr += buf_itr_size; +				buf_size -= buf_itr_size;  			} -			seg->dto_urb->transfer_buffer_length = buf_itr_size;  		}  		seg->status = WA_SEG_READY; -		buf_itr += buf_itr_size; -		buf_size -= buf_itr_size;  	}  	return 0; -error_sg_alloc: +	/* +	 * Free the memory for the current segment which failed to init. +	 * Use the fact that cnt is left at were it failed.  The remaining +	 * segments will be cleaned up by wa_xfer_destroy. +	 */ +error_seg_outbound_populate:  	usb_free_urb(xfer->seg[cnt]->dto_urb);  error_dto_alloc: +	usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb); +error_iso_pack_desc_alloc:  	kfree(xfer->seg[cnt]); -	cnt--; +	xfer->seg[cnt] = NULL;  error_seg_kmalloc: -	/* use the fact that cnt is left at were it failed */ -	for (; cnt >= 0; cnt--) { -		if (xfer->seg[cnt] && xfer->is_inbound == 0) { -			usb_free_urb(xfer->seg[cnt]->dto_urb); -			kfree(xfer->seg[cnt]->dto_urb->sg); -		} -		kfree(xfer->seg[cnt]); -	}  error_segs_kzalloc:  	return result;  } @@ -856,21 +1318,50 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)  	wa_xfer_id_init(xfer);  	__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); -	/* Fill remainig headers */ +	/* Fill remaining headers */  	xfer_hdr = xfer_hdr0; -	transfer_size = urb->transfer_buffer_length; -	xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? -		xfer->seg_size : transfer_size; -	transfer_size -=  xfer->seg_size; -	for (cnt = 1; cnt < xfer->segs; cnt++) { -		xfer_hdr = &xfer->seg[cnt]->xfer_hdr; -		memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); -		xfer_hdr->bTransferSegment = cnt; -		xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? -			cpu_to_le32(xfer->seg_size) -			: cpu_to_le32(transfer_size); -		xfer->seg[cnt]->status = WA_SEG_READY; +	if (xfer_type == WA_XFER_TYPE_ISO) { +		xfer_hdr0->dwTransferLength = +			cpu_to_le32(xfer->seg[0]->isoc_size); +		for (cnt = 1; cnt < xfer->segs; cnt++) { +			struct wa_xfer_packet_info_hwaiso *packet_desc; +			struct wa_seg *seg = xfer->seg[cnt]; +			struct wa_xfer_hwaiso *xfer_iso; + +			xfer_hdr = &seg->xfer_hdr; +			xfer_iso = container_of(xfer_hdr, +						struct wa_xfer_hwaiso, hdr); +			packet_desc = ((void *)xfer_hdr) + xfer_hdr_size; +			/* +			 * Copy values from the 0th header. Segment specific +			 * values are set below. +			 */ +			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); +			xfer_hdr->bTransferSegment = cnt; +			xfer_hdr->dwTransferLength = +				cpu_to_le32(seg->isoc_size); +			xfer_iso->dwNumOfPackets = +					cpu_to_le32(seg->isoc_frame_count); +			__wa_setup_isoc_packet_descr(packet_desc, xfer, seg); +			seg->status = WA_SEG_READY; +		} +	} else { +		transfer_size = urb->transfer_buffer_length; +		xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? +			cpu_to_le32(xfer->seg_size) : +			cpu_to_le32(transfer_size);  		transfer_size -=  xfer->seg_size; +		for (cnt = 1; cnt < xfer->segs; cnt++) { +			xfer_hdr = &xfer->seg[cnt]->xfer_hdr; +			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); +			xfer_hdr->bTransferSegment = cnt; +			xfer_hdr->dwTransferLength = +				transfer_size > xfer->seg_size ? +					cpu_to_le32(xfer->seg_size) +					: cpu_to_le32(transfer_size); +			xfer->seg[cnt]->status = WA_SEG_READY; +			transfer_size -=  xfer->seg_size; +		}  	}  	xfer_hdr->bTransferSegment |= 0x80;	/* this is the last segment */  	result = 0; @@ -885,70 +1376,161 @@ error_setup_sizes:   * rpipe->seg_lock is held!   */  static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, -			   struct wa_seg *seg) +			   struct wa_seg *seg, int *dto_done)  {  	int result; -	result = usb_submit_urb(&seg->urb, GFP_ATOMIC); + +	/* default to done unless we encounter a multi-frame isoc segment. */ +	*dto_done = 1; + +	/* +	 * Take a ref for each segment urb so the xfer cannot disappear until +	 * all of the callbacks run. +	 */ +	wa_xfer_get(xfer); +	/* submit the transfer request. */ +	seg->status = WA_SEG_SUBMITTED; +	result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);  	if (result < 0) { -		printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", -		       xfer, seg->index, result); -		goto error_seg_submit; +		pr_err("%s: xfer %p#%u: REQ submit failed: %d\n", +		       __func__, xfer, seg->index, result); +		wa_xfer_put(xfer); +		goto error_tr_submit;  	} +	/* submit the isoc packet descriptor if present. */ +	if (seg->isoc_pack_desc_urb) { +		wa_xfer_get(xfer); +		result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC); +		seg->isoc_frame_index = 0; +		if (result < 0) { +			pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n", +			       __func__, xfer, seg->index, result); +			wa_xfer_put(xfer); +			goto error_iso_pack_desc_submit; +		} +	} +	/* submit the out data if this is an out request. */  	if (seg->dto_urb) { +		struct wahc *wa = xfer->wa; +		wa_xfer_get(xfer);  		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);  		if (result < 0) { -			printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", -			       xfer, seg->index, result); +			pr_err("%s: xfer %p#%u: DTO submit failed: %d\n", +			       __func__, xfer, seg->index, result); +			wa_xfer_put(xfer);  			goto error_dto_submit;  		} +		/* +		 * If this segment contains more than one isoc frame, hold +		 * onto the dto resource until we send all frames. +		 * Only applies to non-Alereon devices. +		 */ +		if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0) +			&& (seg->isoc_frame_count > 1)) +			*dto_done = 0;  	} -	seg->status = WA_SEG_SUBMITTED;  	rpipe_avail_dec(rpipe);  	return 0;  error_dto_submit: -	usb_unlink_urb(&seg->urb); -error_seg_submit: +	usb_unlink_urb(seg->isoc_pack_desc_urb); +error_iso_pack_desc_submit: +	usb_unlink_urb(&seg->tr_urb); +error_tr_submit:  	seg->status = WA_SEG_ERROR;  	seg->result = result; +	*dto_done = 1;  	return result;  }  /* - * Execute more queued request segments until the maximum concurrent allowed + * Execute more queued request segments until the maximum concurrent allowed. + * Return true if the DTO resource was acquired and released.   *   * The ugly unlock/lock sequence on the error path is needed as the   * xfer->lock normally nests the seg_lock and not viceversa. - *   */ -static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) +static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)  { -	int result; +	int result, dto_acquired = 0, dto_done = 0;  	struct device *dev = &rpipe->wa->usb_iface->dev;  	struct wa_seg *seg;  	struct wa_xfer *xfer;  	unsigned long flags; +	*dto_waiting = 0; +  	spin_lock_irqsave(&rpipe->seg_lock, flags);  	while (atomic_read(&rpipe->segs_available) > 0 -	      && !list_empty(&rpipe->seg_list)) { +	      && !list_empty(&rpipe->seg_list) +	      && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {  		seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,  				 list_node);  		list_del(&seg->list_node);  		xfer = seg->xfer; -		result = __wa_seg_submit(rpipe, xfer, seg); -		dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", -			xfer, seg->index, atomic_read(&rpipe->segs_available), result); +		/* +		 * Get a reference to the xfer in case the callbacks for the +		 * URBs submitted by __wa_seg_submit attempt to complete +		 * the xfer before this function completes. +		 */ +		wa_xfer_get(xfer); +		result = __wa_seg_submit(rpipe, xfer, seg, &dto_done); +		/* release the dto resource if this RPIPE is done with it. */ +		if (dto_done) +			__wa_dto_put(rpipe->wa); +		dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n", +			xfer, wa_xfer_id(xfer), seg->index, +			atomic_read(&rpipe->segs_available), result);  		if (unlikely(result < 0)) { +			int done; +  			spin_unlock_irqrestore(&rpipe->seg_lock, flags);  			spin_lock_irqsave(&xfer->lock, flags);  			__wa_xfer_abort(xfer); +			/* +			 * This seg was marked as submitted when it was put on +			 * the RPIPE seg_list.  Mark it done. +			 */  			xfer->segs_done++; +			done = __wa_xfer_is_done(xfer);  			spin_unlock_irqrestore(&xfer->lock, flags); +			if (done) +				wa_xfer_completion(xfer);  			spin_lock_irqsave(&rpipe->seg_lock, flags);  		} +		wa_xfer_put(xfer);  	} +	/* +	 * Mark this RPIPE as waiting if dto was not acquired, there are +	 * delayed segs and no active transfers to wake us up later. +	 */ +	if (!dto_acquired && !list_empty(&rpipe->seg_list) +		&& (atomic_read(&rpipe->segs_available) == +			le16_to_cpu(rpipe->descr.wRequests))) +		*dto_waiting = 1; +  	spin_unlock_irqrestore(&rpipe->seg_lock, flags); + +	return dto_done; +} + +static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) +{ +	int dto_waiting; +	int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting); + +	/* +	 * If this RPIPE is waiting on the DTO resource, add it to the tail of +	 * the waiting list. +	 * Otherwise, if the WA DTO resource was acquired and released by +	 *  __wa_xfer_delayed_run, another RPIPE may have attempted to acquire +	 * DTO and failed during that time.  Check the delayed list and process +	 * any waiters.  Start searching from the next RPIPE index. +	 */ +	if (dto_waiting) +		wa_add_delayed_rpipe(rpipe->wa, rpipe); +	else if (dto_done) +		wa_check_for_delayed_rpipes(rpipe->wa);  }  /* @@ -960,7 +1542,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)   */  static int __wa_xfer_submit(struct wa_xfer *xfer)  { -	int result; +	int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;  	struct wahc *wa = xfer->wa;  	struct device *dev = &wa->usb_iface->dev;  	unsigned cnt; @@ -979,27 +1561,58 @@ static int __wa_xfer_submit(struct wa_xfer *xfer)  	result = 0;  	spin_lock_irqsave(&rpipe->seg_lock, flags);  	for (cnt = 0; cnt < xfer->segs; cnt++) { +		int delay_seg = 1; +  		available = atomic_read(&rpipe->segs_available);  		empty = list_empty(&rpipe->seg_list);  		seg = xfer->seg[cnt]; -		dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", -			xfer, cnt, available, empty, -			available == 0 || !empty ? "delayed" : "submitted"); -		if (available == 0 || !empty) { -			dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); +		if (available && empty) { +			/* +			 * Only attempt to acquire DTO if we have a segment +			 * to send. +			 */ +			dto_acquired = __wa_dto_try_get(rpipe->wa); +			if (dto_acquired) { +				delay_seg = 0; +				result = __wa_seg_submit(rpipe, xfer, seg, +							&dto_done); +				dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n", +					xfer, wa_xfer_id(xfer), cnt, available, +					empty); +				if (dto_done) +					__wa_dto_put(rpipe->wa); + +				if (result < 0) { +					__wa_xfer_abort(xfer); +					goto error_seg_submit; +				} +			} +		} + +		if (delay_seg) { +			dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n", +				xfer, wa_xfer_id(xfer), cnt, available,  empty);  			seg->status = WA_SEG_DELAYED;  			list_add_tail(&seg->list_node, &rpipe->seg_list); -		} else { -			result = __wa_seg_submit(rpipe, xfer, seg); -			if (result < 0) { -				__wa_xfer_abort(xfer); -				goto error_seg_submit; -			}  		}  		xfer->segs_submitted++;  	}  error_seg_submit: +	/* +	 * Mark this RPIPE as waiting if dto was not acquired, there are +	 * delayed segs and no active transfers to wake us up later. +	 */ +	if (!dto_acquired && !list_empty(&rpipe->seg_list) +		&& (atomic_read(&rpipe->segs_available) == +			le16_to_cpu(rpipe->descr.wRequests))) +		dto_waiting = 1;  	spin_unlock_irqrestore(&rpipe->seg_lock, flags); + +	if (dto_waiting) +		wa_add_delayed_rpipe(rpipe->wa, rpipe); +	else if (dto_done) +		wa_check_for_delayed_rpipes(rpipe->wa); +  	return result;  } @@ -1025,7 +1638,7 @@ error_seg_submit:   * result never kicks in, the xfer will timeout from the USB code and   * dequeue() will be called.   */ -static void wa_urb_enqueue_b(struct wa_xfer *xfer) +static int wa_urb_enqueue_b(struct wa_xfer *xfer)  {  	int result;  	unsigned long flags; @@ -1036,18 +1649,23 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)  	unsigned done;  	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); -	if (result < 0) +	if (result < 0) { +		pr_err("%s: error_rpipe_get\n", __func__);  		goto error_rpipe_get; +	}  	result = -ENODEV;  	/* FIXME: segmentation broken -- kills DWA */  	mutex_lock(&wusbhc->mutex);		/* get a WUSB dev */  	if (urb->dev == NULL) {  		mutex_unlock(&wusbhc->mutex); +		pr_err("%s: error usb dev gone\n", __func__);  		goto error_dev_gone;  	}  	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);  	if (wusb_dev == NULL) {  		mutex_unlock(&wusbhc->mutex); +		dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n", +			__func__);  		goto error_dev_gone;  	}  	mutex_unlock(&wusbhc->mutex); @@ -1055,21 +1673,35 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)  	spin_lock_irqsave(&xfer->lock, flags);  	xfer->wusb_dev = wusb_dev;  	result = urb->status; -	if (urb->status != -EINPROGRESS) +	if (urb->status != -EINPROGRESS) { +		dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);  		goto error_dequeued; +	}  	result = __wa_xfer_setup(xfer, urb); -	if (result < 0) +	if (result < 0) { +		dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);  		goto error_xfer_setup; +	} +	/* +	 * Get a xfer reference since __wa_xfer_submit starts asynchronous +	 * operations that may try to complete the xfer before this function +	 * exits. +	 */ +	wa_xfer_get(xfer);  	result = __wa_xfer_submit(xfer); -	if (result < 0) +	if (result < 0) { +		dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);  		goto error_xfer_submit; +	}  	spin_unlock_irqrestore(&xfer->lock, flags); -	return; +	wa_xfer_put(xfer); +	return 0; -	/* this is basically wa_xfer_completion() broken up wa_xfer_giveback() -	 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean -	 * upundo setup(). +	/* +	 * this is basically wa_xfer_completion() broken up wa_xfer_giveback() +	 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo +	 * setup().  	 */  error_xfer_setup:  error_dequeued: @@ -1081,8 +1713,7 @@ error_dev_gone:  	rpipe_put(xfer->ep->hcpriv);  error_rpipe_get:  	xfer->result = result; -	wa_xfer_giveback(xfer); -	return; +	return result;  error_xfer_submit:  	done = __wa_xfer_is_done(xfer); @@ -1090,6 +1721,9 @@ error_xfer_submit:  	spin_unlock_irqrestore(&xfer->lock, flags);  	if (done)  		wa_xfer_completion(xfer); +	wa_xfer_put(xfer); +	/* return success since the completion routine will run. */ +	return 0;  }  /* @@ -1123,7 +1757,8 @@ void wa_urb_enqueue_run(struct work_struct *ws)  		list_del_init(&xfer->list_node);  		urb = xfer->urb; -		wa_urb_enqueue_b(xfer); +		if (wa_urb_enqueue_b(xfer) < 0) +			wa_xfer_giveback(xfer);  		usb_put_urb(urb);	/* taken when queuing */  	}  } @@ -1201,6 +1836,12 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,  		dump_stack();  	} +	spin_lock_irqsave(&wa->xfer_list_lock, my_flags); +	result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb); +	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); +	if (result < 0) +		goto error_link_urb; +  	result = -ENOMEM;  	xfer = kzalloc(sizeof(*xfer), gfp);  	if (xfer == NULL) @@ -1229,13 +1870,32 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,  		spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);  		queue_work(wusbd, &wa->xfer_enqueue_work);  	} else { -		wa_urb_enqueue_b(xfer); +		result = wa_urb_enqueue_b(xfer); +		if (result < 0) { +			/* +			 * URB submit/enqueue failed.  Clean up, return an +			 * error and do not run the callback.  This avoids +			 * an infinite submit/complete loop. +			 */ +			dev_err(dev, "%s: URB enqueue failed: %d\n", +			   __func__, result); +			wa_put(xfer->wa); +			wa_xfer_put(xfer); +			spin_lock_irqsave(&wa->xfer_list_lock, my_flags); +			usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); +			spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); +			return result; +		}  	}  	return 0;  error_dequeued:  	kfree(xfer);  error_kmalloc: +	spin_lock_irqsave(&wa->xfer_list_lock, my_flags); +	usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); +	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); +error_link_urb:  	return result;  }  EXPORT_SYMBOL_GPL(wa_urb_enqueue); @@ -1258,31 +1918,51 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue);   * asynch request] and then make sure we cancel each segment.   *   */ -int wa_urb_dequeue(struct wahc *wa, struct urb *urb) +int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)  {  	unsigned long flags, flags2;  	struct wa_xfer *xfer;  	struct wa_seg *seg;  	struct wa_rpipe *rpipe; -	unsigned cnt; +	unsigned cnt, done = 0, xfer_abort_pending;  	unsigned rpipe_ready = 0; +	int result; -	xfer = urb->hcpriv; -	if (xfer == NULL) { +	/* check if it is safe to unlink. */ +	spin_lock_irqsave(&wa->xfer_list_lock, flags); +	result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status); +	if ((result == 0) && urb->hcpriv) {  		/* -		 * Nothing setup yet enqueue will see urb->status != -		 * -EINPROGRESS (by hcd layer) and bail out with -		 * error, no need to do completion +		 * Get a xfer ref to prevent a race with wa_xfer_giveback +		 * cleaning up the xfer while we are working with it.  		 */ -		BUG_ON(urb->status == -EINPROGRESS); -		goto out; +		wa_xfer_get(urb->hcpriv);  	} +	spin_unlock_irqrestore(&wa->xfer_list_lock, flags); +	if (result) +		return result; + +	xfer = urb->hcpriv; +	if (xfer == NULL) +		return -ENOENT;  	spin_lock_irqsave(&xfer->lock, flags); +	pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));  	rpipe = xfer->ep->hcpriv;  	if (rpipe == NULL) { -		pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s", -			__func__, wa_xfer_id(xfer), +		pr_debug("%s: xfer %p id 0x%08X has no RPIPE.  %s", +			__func__, xfer, wa_xfer_id(xfer),  			"Probably already aborted.\n" ); +		result = -ENOENT; +		goto out_unlock; +	} +	/* +	 * Check for done to avoid racing with wa_xfer_giveback and completing +	 * twice. +	 */ +	if (__wa_xfer_is_done(xfer)) { +		pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__, +			xfer, wa_xfer_id(xfer)); +		result = -ENOENT;  		goto out_unlock;  	}  	/* Check the delayed list -> if there, release and complete */ @@ -1293,9 +1973,16 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)  	if (xfer->seg == NULL)  	/* still hasn't reached */  		goto out_unlock;	/* setup(), enqueue_b() completes */  	/* Ok, the xfer is in flight already, it's been setup and submitted.*/ -	__wa_xfer_abort(xfer); +	xfer_abort_pending = __wa_xfer_abort(xfer) >= 0; +	/* +	 * grab the rpipe->seg_lock here to prevent racing with +	 * __wa_xfer_delayed_run. +	 */ +	spin_lock(&rpipe->seg_lock);  	for (cnt = 0; cnt < xfer->segs; cnt++) {  		seg = xfer->seg[cnt]; +		pr_debug("%s: xfer id 0x%08X#%d status = %d\n", +			__func__, wa_xfer_id(xfer), cnt, seg->status);  		switch (seg->status) {  		case WA_SEG_NOTREADY:  		case WA_SEG_READY: @@ -1304,50 +1991,68 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)  			WARN_ON(1);  			break;  		case WA_SEG_DELAYED: +			/* +			 * delete from rpipe delayed list.  If no segments on +			 * this xfer have been submitted, __wa_xfer_is_done will +			 * trigger a giveback below.  Otherwise, the submitted +			 * segments will be completed in the DTI interrupt. +			 */  			seg->status = WA_SEG_ABORTED; -			spin_lock_irqsave(&rpipe->seg_lock, flags2); +			seg->result = -ENOENT;  			list_del(&seg->list_node);  			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe); -			spin_unlock_irqrestore(&rpipe->seg_lock, flags2); -			break; -		case WA_SEG_SUBMITTED: -			seg->status = WA_SEG_ABORTED; -			usb_unlink_urb(&seg->urb); -			if (xfer->is_inbound == 0) -				usb_unlink_urb(seg->dto_urb); -			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe); -			break; -		case WA_SEG_PENDING: -			seg->status = WA_SEG_ABORTED; -			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe); -			break; -		case WA_SEG_DTI_PENDING: -			usb_unlink_urb(wa->dti_urb); -			seg->status = WA_SEG_ABORTED; -			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe);  			break;  		case WA_SEG_DONE:  		case WA_SEG_ERROR:  		case WA_SEG_ABORTED:  			break; +			/* +			 * The buf_in data for a segment in the +			 * WA_SEG_DTI_PENDING state is actively being read. +			 * Let wa_buf_in_cb handle it since it will be called +			 * and will increment xfer->segs_done.  Cleaning up +			 * here could cause wa_buf_in_cb to access the xfer +			 * after it has been completed/freed. +			 */ +		case WA_SEG_DTI_PENDING: +			break; +			/* +			 * In the states below, the HWA device already knows +			 * about the transfer.  If an abort request was sent, +			 * allow the HWA to process it and wait for the +			 * results.  Otherwise, the DTI state and seg completed +			 * counts can get out of sync. +			 */ +		case WA_SEG_SUBMITTED: +		case WA_SEG_PENDING: +			/* +			 * Check if the abort was successfully sent.  This could +			 * be false if the HWA has been removed but we haven't +			 * gotten the disconnect notification yet. +			 */ +			if (!xfer_abort_pending) { +				seg->status = WA_SEG_ABORTED; +				rpipe_ready = rpipe_avail_inc(rpipe); +				xfer->segs_done++; +			} +			break;  		}  	} +	spin_unlock(&rpipe->seg_lock);  	xfer->result = urb->status;	/* -ENOENT or -ECONNRESET */ -	__wa_xfer_is_done(xfer); +	done = __wa_xfer_is_done(xfer);  	spin_unlock_irqrestore(&xfer->lock, flags); -	wa_xfer_completion(xfer); +	if (done) +		wa_xfer_completion(xfer);  	if (rpipe_ready)  		wa_xfer_delayed_run(rpipe); -	return 0; +	wa_xfer_put(xfer); +	return result;  out_unlock:  	spin_unlock_irqrestore(&xfer->lock, flags); -out: -	return 0; +	wa_xfer_put(xfer); +	return result;  dequeue_delayed:  	list_del_init(&xfer->list_node); @@ -1355,6 +2060,7 @@ dequeue_delayed:  	xfer->result = urb->status;  	spin_unlock_irqrestore(&xfer->lock, flags);  	wa_xfer_giveback(xfer); +	wa_xfer_put(xfer);  	usb_put_urb(urb);		/* we got a ref in enqueue() */  	return 0;  } @@ -1383,7 +2089,7 @@ static int wa_xfer_status_to_errno(u8 status)  		[WA_XFER_STATUS_NOT_FOUND] =		0,  		[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,  		[WA_XFER_STATUS_TRANSACTION_ERROR] = 	-EILSEQ, -		[WA_XFER_STATUS_ABORTED] = 		-EINTR, +		[WA_XFER_STATUS_ABORTED] =		-ENOENT,  		[WA_XFER_STATUS_RPIPE_NOT_READY] = 	EINVAL,  		[WA_XFER_INVALID_FORMAT] = 		EINVAL,  		[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = 	EINVAL, @@ -1410,24 +2116,178 @@ static int wa_xfer_status_to_errno(u8 status)  }  /* + * If a last segment flag and/or a transfer result error is encountered, + * no other segment transfer results will be returned from the device. + * Mark the remaining submitted or pending xfers as completed so that + * the xfer will complete cleanly. + * + * xfer->lock must be held + * + */ +static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, +		int starting_index, enum wa_seg_status status) +{ +	int index; +	struct wa_rpipe *rpipe = xfer->ep->hcpriv; + +	for (index = starting_index; index < xfer->segs_submitted; index++) { +		struct wa_seg *current_seg = xfer->seg[index]; + +		BUG_ON(current_seg == NULL); + +		switch (current_seg->status) { +		case WA_SEG_SUBMITTED: +		case WA_SEG_PENDING: +		case WA_SEG_DTI_PENDING: +			rpipe_avail_inc(rpipe); +		/* +		 * do not increment RPIPE avail for the WA_SEG_DELAYED case +		 * since it has not been submitted to the RPIPE. +		 */ +		case WA_SEG_DELAYED: +			xfer->segs_done++; +			current_seg->status = status; +			break; +		case WA_SEG_ABORTED: +			break; +		default: +			WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n", +				__func__, wa_xfer_id(xfer), index, +				current_seg->status); +			break; +		} +	} +} + +/* Populate the given urb based on the current isoc transfer state. */ +static int __wa_populate_buf_in_urb_isoc(struct wahc *wa, +	struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg) +{ +	int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset; +	int seg_index, total_len = 0, urb_frame_index = urb_start_frame; +	struct usb_iso_packet_descriptor *iso_frame_desc = +						xfer->urb->iso_frame_desc; +	const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd); +	int next_frame_contiguous; +	struct usb_iso_packet_descriptor *iso_frame; + +	BUG_ON(buf_in_urb->status == -EINPROGRESS); + +	/* +	 * If the current frame actual_length is contiguous with the next frame +	 * and actual_length is a multiple of the DTI endpoint max packet size, +	 * combine the current frame with the next frame in a single URB.  This +	 * reduces the number of URBs that must be submitted in that case. +	 */ +	seg_index = seg->isoc_frame_index; +	do { +		next_frame_contiguous = 0; + +		iso_frame = &iso_frame_desc[urb_frame_index]; +		total_len += iso_frame->actual_length; +		++urb_frame_index; +		++seg_index; + +		if (seg_index < seg->isoc_frame_count) { +			struct usb_iso_packet_descriptor *next_iso_frame; + +			next_iso_frame = &iso_frame_desc[urb_frame_index]; + +			if ((iso_frame->offset + iso_frame->actual_length) == +				next_iso_frame->offset) +				next_frame_contiguous = 1; +		} +	} while (next_frame_contiguous +			&& ((iso_frame->actual_length % dti_packet_size) == 0)); + +	/* this should always be 0 before a resubmit. */ +	buf_in_urb->num_mapped_sgs	= 0; +	buf_in_urb->transfer_dma = xfer->urb->transfer_dma + +		iso_frame_desc[urb_start_frame].offset; +	buf_in_urb->transfer_buffer_length = total_len; +	buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +	buf_in_urb->transfer_buffer = NULL; +	buf_in_urb->sg = NULL; +	buf_in_urb->num_sgs = 0; +	buf_in_urb->context = seg; + +	/* return the number of frames included in this URB. */ +	return seg_index - seg->isoc_frame_index; +} + +/* Populate the given urb based on the current transfer state. */ +static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer, +	unsigned int seg_idx, unsigned int bytes_transferred) +{ +	int result = 0; +	struct wa_seg *seg = xfer->seg[seg_idx]; + +	BUG_ON(buf_in_urb->status == -EINPROGRESS); +	/* this should always be 0 before a resubmit. */ +	buf_in_urb->num_mapped_sgs	= 0; + +	if (xfer->is_dma) { +		buf_in_urb->transfer_dma = xfer->urb->transfer_dma +			+ (seg_idx * xfer->seg_size); +		buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +		buf_in_urb->transfer_buffer = NULL; +		buf_in_urb->sg = NULL; +		buf_in_urb->num_sgs = 0; +	} else { +		/* do buffer or SG processing. */ +		buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; + +		if (xfer->urb->transfer_buffer) { +			buf_in_urb->transfer_buffer = +				xfer->urb->transfer_buffer +				+ (seg_idx * xfer->seg_size); +			buf_in_urb->sg = NULL; +			buf_in_urb->num_sgs = 0; +		} else { +			/* allocate an SG list to store seg_size bytes +				and copy the subset of the xfer->urb->sg +				that matches the buffer subset we are +				about to read. */ +			buf_in_urb->sg = wa_xfer_create_subset_sg( +				xfer->urb->sg, +				seg_idx * xfer->seg_size, +				bytes_transferred, +				&(buf_in_urb->num_sgs)); + +			if (!(buf_in_urb->sg)) { +				buf_in_urb->num_sgs	= 0; +				result = -ENOMEM; +			} +			buf_in_urb->transfer_buffer = NULL; +		} +	} +	buf_in_urb->transfer_buffer_length = bytes_transferred; +	buf_in_urb->context = seg; + +	return result; +} + +/*   * Process a xfer result completion message   * - * inbound transfers: need to schedule a DTI read + * inbound transfers: need to schedule a buf_in_urb read   *   * FIXME: this function needs to be broken up in parts   */ -static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) +static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, +		struct wa_xfer_result *xfer_result)  {  	int result;  	struct device *dev = &wa->usb_iface->dev;  	unsigned long flags; -	u8 seg_idx; +	unsigned int seg_idx;  	struct wa_seg *seg;  	struct wa_rpipe *rpipe; -	struct wa_xfer_result *xfer_result = wa->xfer_result; -	u8 done = 0; +	unsigned done = 0;  	u8 usb_status;  	unsigned rpipe_ready = 0; +	unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength); +	struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);  	spin_lock_irqsave(&xfer->lock, flags);  	seg_idx = xfer_result->bTransferSegment & 0x7f; @@ -1436,8 +2296,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)  	seg = xfer->seg[seg_idx];  	rpipe = xfer->ep->hcpriv;  	usb_status = xfer_result->bTransferStatus; -	dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n", -		xfer, seg_idx, usb_status, seg->status); +	dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n", +		xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);  	if (seg->status == WA_SEG_ABORTED  	    || seg->status == WA_SEG_ERROR)	/* already handled */  		goto segment_aborted; @@ -1451,71 +2311,48 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)  	}  	if (usb_status & 0x80) {  		seg->result = wa_xfer_status_to_errno(usb_status); -		dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n", +		dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",  			xfer, xfer->id, seg->index, usb_status); +		seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ? +			WA_SEG_ABORTED : WA_SEG_ERROR;  		goto error_complete;  	}  	/* FIXME: we ignore warnings, tally them for stats */  	if (usb_status & 0x40) 		/* Warning?... */  		usb_status = 0;		/* ... pass */ -	if (xfer->is_inbound) {	/* IN data phase: read to buffer */ +	/* +	 * If the last segment bit is set, complete the remaining segments. +	 * When the current segment is completed, either in wa_buf_in_cb for +	 * transfers with data or below for no data, the xfer will complete. +	 */ +	if (xfer_result->bTransferSegment & 0x80) +		wa_complete_remaining_xfer_segs(xfer, seg->index + 1, +			WA_SEG_DONE); +	if (usb_pipeisoc(xfer->urb->pipe) +		&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) { +		/* set up WA state to read the isoc packet status next. */ +		wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer); +		wa->dti_isoc_xfer_seg = seg_idx; +		wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING; +	} else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe) +			&& (bytes_transferred > 0)) { +		/* IN data phase: read to buffer */  		seg->status = WA_SEG_DTI_PENDING; -		BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); -		/* this should always be 0 before a resubmit. */ -		wa->buf_in_urb->num_mapped_sgs	= 0; - -		if (xfer->is_dma) { -			wa->buf_in_urb->transfer_dma = -				xfer->urb->transfer_dma -				+ (seg_idx * xfer->seg_size); -			wa->buf_in_urb->transfer_flags -				|= URB_NO_TRANSFER_DMA_MAP; -			wa->buf_in_urb->transfer_buffer = NULL; -			wa->buf_in_urb->sg = NULL; -			wa->buf_in_urb->num_sgs = 0; -		} else { -			/* do buffer or SG processing. */ -			wa->buf_in_urb->transfer_flags -				&= ~URB_NO_TRANSFER_DMA_MAP; - -			if (xfer->urb->transfer_buffer) { -				wa->buf_in_urb->transfer_buffer = -					xfer->urb->transfer_buffer -					+ (seg_idx * xfer->seg_size); -				wa->buf_in_urb->sg = NULL; -				wa->buf_in_urb->num_sgs = 0; -			} else { -				/* allocate an SG list to store seg_size bytes -					and copy the subset of the xfer->urb->sg -					that matches the buffer subset we are -					about to read. */ -				wa->buf_in_urb->sg = wa_xfer_create_subset_sg( -					xfer->urb->sg, -					seg_idx * xfer->seg_size, -					le32_to_cpu( -						xfer_result->dwTransferLength), -					&(wa->buf_in_urb->num_sgs)); - -				if (!(wa->buf_in_urb->sg)) { -					wa->buf_in_urb->num_sgs	= 0; -					goto error_sg_alloc; -				} -				wa->buf_in_urb->transfer_buffer = NULL; -			} -		} -		wa->buf_in_urb->transfer_buffer_length = -			le32_to_cpu(xfer_result->dwTransferLength); -		wa->buf_in_urb->context = seg; -		result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); +		result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx, +			bytes_transferred);  		if (result < 0) +			goto error_buf_in_populate; +		++(wa->active_buf_in_urbs); +		result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); +		if (result < 0) { +			--(wa->active_buf_in_urbs);  			goto error_submit_buf_in; +		}  	} else { -		/* OUT data phase, complete it -- */ -		seg->status = WA_SEG_DONE; -		seg->result = le32_to_cpu(xfer_result->dwTransferLength); -		xfer->segs_done++; +		/* OUT data phase or no data, complete it -- */ +		seg->result = bytes_transferred;  		rpipe_ready = rpipe_avail_inc(rpipe); -		done = __wa_xfer_is_done(xfer); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);  	}  	spin_unlock_irqrestore(&xfer->lock, flags);  	if (done) @@ -1534,13 +2371,15 @@ error_submit_buf_in:  		dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",  			xfer, seg_idx, result);  	seg->result = result; -	kfree(wa->buf_in_urb->sg); -error_sg_alloc: +	kfree(buf_in_urb->sg); +	buf_in_urb->sg = NULL; +error_buf_in_populate:  	__wa_xfer_abort(xfer); -error_complete:  	seg->status = WA_SEG_ERROR; +error_complete:  	xfer->segs_done++;  	rpipe_ready = rpipe_avail_inc(rpipe); +	wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);  	done = __wa_xfer_is_done(xfer);  	/*  	 * queue work item to clear STALL for control endpoints. @@ -1551,12 +2390,10 @@ error_complete:  		done) {  		dev_info(dev, "Control EP stall.  Queue delayed work.\n"); -		spin_lock_irq(&wa->xfer_list_lock); -		/* remove xfer from xfer_list. */ -		list_del(&xfer->list_node); -		/* add xfer to xfer_errored_list. */ -		list_add_tail(&xfer->list_node, &wa->xfer_errored_list); -		spin_unlock_irq(&wa->xfer_list_lock); +		spin_lock(&wa->xfer_list_lock); +		/* move xfer from xfer_list to xfer_errored_list. */ +		list_move_tail(&xfer->list_node, &wa->xfer_errored_list); +		spin_unlock(&wa->xfer_list_lock);  		spin_unlock_irqrestore(&xfer->lock, flags);  		queue_work(wusbd, &wa->xfer_error_work);  	} else { @@ -1571,7 +2408,7 @@ error_complete:  error_bad_seg:  	spin_unlock_irqrestore(&xfer->lock, flags); -	wa_urb_dequeue(wa, xfer->urb); +	wa_urb_dequeue(wa, xfer->urb, -ENOENT);  	if (printk_ratelimit())  		dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);  	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { @@ -1587,6 +2424,154 @@ segment_aborted:  }  /* + * Process a isochronous packet status message + * + * inbound transfers: need to schedule a buf_in_urb read + */ +static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb) +{ +	struct device *dev = &wa->usb_iface->dev; +	struct wa_xfer_packet_status_hwaiso *packet_status; +	struct wa_xfer_packet_status_len_hwaiso *status_array; +	struct wa_xfer *xfer; +	unsigned long flags; +	struct wa_seg *seg; +	struct wa_rpipe *rpipe; +	unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index; +	unsigned first_frame_index = 0, rpipe_ready = 0; +	int expected_size; + +	/* We have a xfer result buffer; check it */ +	dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n", +		urb->actual_length, urb->transfer_buffer); +	packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf); +	if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) { +		dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n", +			packet_status->bPacketType); +		goto error_parse_buffer; +	} +	xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress); +	if (xfer == NULL) { +		dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n", +			wa->dti_isoc_xfer_in_progress); +		goto error_parse_buffer; +	} +	spin_lock_irqsave(&xfer->lock, flags); +	if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs)) +		goto error_bad_seg; +	seg = xfer->seg[wa->dti_isoc_xfer_seg]; +	rpipe = xfer->ep->hcpriv; +	expected_size = sizeof(*packet_status) + +			(sizeof(packet_status->PacketStatus[0]) * +			seg->isoc_frame_count); +	if (urb->actual_length != expected_size) { +		dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n", +			urb->actual_length, expected_size); +		goto error_bad_seg; +	} +	if (le16_to_cpu(packet_status->wLength) != expected_size) { +		dev_err(dev, "DTI Error: isoc packet status--bad length %u\n", +			le16_to_cpu(packet_status->wLength)); +		goto error_bad_seg; +	} +	/* write isoc packet status and lengths back to the xfer urb. */ +	status_array = packet_status->PacketStatus; +	xfer->urb->start_frame = +		wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd); +	for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) { +		struct usb_iso_packet_descriptor *iso_frame_desc = +			xfer->urb->iso_frame_desc; +		const int xfer_frame_index = +			seg->isoc_frame_offset + seg_index; + +		iso_frame_desc[xfer_frame_index].status = +			wa_xfer_status_to_errno( +			le16_to_cpu(status_array[seg_index].PacketStatus)); +		iso_frame_desc[xfer_frame_index].actual_length = +			le16_to_cpu(status_array[seg_index].PacketLength); +		/* track the number of frames successfully transferred. */ +		if (iso_frame_desc[xfer_frame_index].actual_length > 0) { +			/* save the starting frame index for buf_in_urb. */ +			if (!data_frame_count) +				first_frame_index = seg_index; +			++data_frame_count; +		} +	} + +	if (xfer->is_inbound && data_frame_count) { +		int result, total_frames_read = 0, urb_index = 0; +		struct urb *buf_in_urb; + +		/* IN data phase: read to buffer */ +		seg->status = WA_SEG_DTI_PENDING; + +		/* start with the first frame with data. */ +		seg->isoc_frame_index = first_frame_index; +		/* submit up to WA_MAX_BUF_IN_URBS read URBs. */ +		do { +			int urb_frame_index, urb_frame_count; +			struct usb_iso_packet_descriptor *iso_frame_desc; + +			buf_in_urb = &(wa->buf_in_urbs[urb_index]); +			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, +				buf_in_urb, xfer, seg); +			/* advance frame index to start of next read URB. */ +			seg->isoc_frame_index += urb_frame_count; +			total_frames_read += urb_frame_count; + +			++(wa->active_buf_in_urbs); +			result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); + +			/* skip 0-byte frames. */ +			urb_frame_index = +				seg->isoc_frame_offset + seg->isoc_frame_index; +			iso_frame_desc = +				&(xfer->urb->iso_frame_desc[urb_frame_index]); +			while ((seg->isoc_frame_index < +						seg->isoc_frame_count) && +				 (iso_frame_desc->actual_length == 0)) { +				++(seg->isoc_frame_index); +				++iso_frame_desc; +			} +			++urb_index; + +		} while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS) +				&& (seg->isoc_frame_index < +						seg->isoc_frame_count)); + +		if (result < 0) { +			--(wa->active_buf_in_urbs); +			dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", +				result); +			wa_reset_all(wa); +		} else if (data_frame_count > total_frames_read) +			/* If we need to read more frames, set DTI busy. */ +			dti_busy = 1; +	} else { +		/* OUT transfer or no more IN data, complete it -- */ +		rpipe_ready = rpipe_avail_inc(rpipe); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); +	} +	spin_unlock_irqrestore(&xfer->lock, flags); +	if (dti_busy) +		wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING; +	else +		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; +	if (done) +		wa_xfer_completion(xfer); +	if (rpipe_ready) +		wa_xfer_delayed_run(rpipe); +	wa_xfer_put(xfer); +	return dti_busy; + +error_bad_seg: +	spin_unlock_irqrestore(&xfer->lock, flags); +	wa_xfer_put(xfer); +error_parse_buffer: +	return dti_busy; +} + +/*   * Callback for the IN data phase   *   * If successful transition state; otherwise, take a note of the @@ -1603,27 +2588,86 @@ static void wa_buf_in_cb(struct urb *urb)  	struct wahc *wa;  	struct device *dev;  	struct wa_rpipe *rpipe; -	unsigned rpipe_ready; +	unsigned rpipe_ready = 0, isoc_data_frame_count = 0;  	unsigned long flags; +	int resubmit_dti = 0, active_buf_in_urbs;  	u8 done = 0;  	/* free the sg if it was used. */  	kfree(urb->sg);  	urb->sg = NULL; +	spin_lock_irqsave(&xfer->lock, flags); +	wa = xfer->wa; +	dev = &wa->usb_iface->dev; +	--(wa->active_buf_in_urbs); +	active_buf_in_urbs = wa->active_buf_in_urbs; + +	if (usb_pipeisoc(xfer->urb->pipe)) { +		struct usb_iso_packet_descriptor *iso_frame_desc = +			xfer->urb->iso_frame_desc; +		int	seg_index; + +		/* +		 * Find the next isoc frame with data and count how many +		 * frames with data remain. +		 */ +		seg_index = seg->isoc_frame_index; +		while (seg_index < seg->isoc_frame_count) { +			const int urb_frame_index = +				seg->isoc_frame_offset + seg_index; + +			if (iso_frame_desc[urb_frame_index].actual_length > 0) { +				/* save the index of the next frame with data */ +				if (!isoc_data_frame_count) +					seg->isoc_frame_index = seg_index; +				++isoc_data_frame_count; +			} +			++seg_index; +		} +	} +	spin_unlock_irqrestore(&xfer->lock, flags); +  	switch (urb->status) {  	case 0:  		spin_lock_irqsave(&xfer->lock, flags); -		wa = xfer->wa; -		dev = &wa->usb_iface->dev; -		rpipe = xfer->ep->hcpriv; -		dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", -			xfer, seg->index, (size_t)urb->actual_length); -		seg->status = WA_SEG_DONE; -		seg->result = urb->actual_length; -		xfer->segs_done++; -		rpipe_ready = rpipe_avail_inc(rpipe); -		done = __wa_xfer_is_done(xfer); + +		seg->result += urb->actual_length; +		if (isoc_data_frame_count > 0) { +			int result, urb_frame_count; + +			/* submit a read URB for the next frame with data. */ +			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb, +				 xfer, seg); +			/* advance index to start of next read URB. */ +			seg->isoc_frame_index += urb_frame_count; +			++(wa->active_buf_in_urbs); +			result = usb_submit_urb(urb, GFP_ATOMIC); +			if (result < 0) { +				--(wa->active_buf_in_urbs); +				dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", +					result); +				wa_reset_all(wa); +			} +			/* +			 * If we are in this callback and +			 * isoc_data_frame_count > 0, it means that the dti_urb +			 * submission was delayed in wa_dti_cb.  Once +			 * we submit the last buf_in_urb, we can submit the +			 * delayed dti_urb. +			 */ +			  resubmit_dti = (isoc_data_frame_count == +							urb_frame_count); +		} else if (active_buf_in_urbs == 0) { +			rpipe = xfer->ep->hcpriv; +			dev_dbg(dev, +				"xfer %p 0x%08X#%u: data in done (%zu bytes)\n", +				xfer, wa_xfer_id(xfer), seg->index, +				seg->result); +			rpipe_ready = rpipe_avail_inc(rpipe); +			done = __wa_xfer_mark_seg_as_done(xfer, seg, +					WA_SEG_DONE); +		}  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done)  			wa_xfer_completion(xfer); @@ -1634,31 +2678,50 @@ static void wa_buf_in_cb(struct urb *urb)  	case -ENOENT:		/* as it was done by the who unlinked us */  		break;  	default:		/* Other errors ... */ +		/* +		 * Error on data buf read.  Only resubmit DTI if it hasn't +		 * already been done by previously hitting this error or by a +		 * successful completion of the previous buf_in_urb. +		 */ +		resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;  		spin_lock_irqsave(&xfer->lock, flags); -		wa = xfer->wa; -		dev = &wa->usb_iface->dev;  		rpipe = xfer->ep->hcpriv;  		if (printk_ratelimit()) -			dev_err(dev, "xfer %p#%u: data in error %d\n", -				xfer, seg->index, urb->status); +			dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n", +				xfer, wa_xfer_id(xfer), seg->index, +				urb->status);  		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,  			    EDC_ERROR_TIMEFRAME)){  			dev_err(dev, "DTO: URB max acceptable errors "  				"exceeded, resetting device\n");  			wa_reset_all(wa);  		} -		seg->status = WA_SEG_ERROR;  		seg->result = urb->status; -		xfer->segs_done++;  		rpipe_ready = rpipe_avail_inc(rpipe); -		__wa_xfer_abort(xfer); -		done = __wa_xfer_is_done(xfer); +		if (active_buf_in_urbs == 0) +			done = __wa_xfer_mark_seg_as_done(xfer, seg, +				WA_SEG_ERROR); +		else +			__wa_xfer_abort(xfer);  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done)  			wa_xfer_completion(xfer);  		if (rpipe_ready)  			wa_xfer_delayed_run(rpipe);  	} + +	if (resubmit_dti) { +		int result; + +		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; + +		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); +		if (result < 0) { +			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", +				result); +			wa_reset_all(wa); +		} +	}  }  /* @@ -1687,56 +2750,65 @@ static void wa_buf_in_cb(struct urb *urb)   * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many   * errors) in the URBs.   */ -static void wa_xfer_result_cb(struct urb *urb) +static void wa_dti_cb(struct urb *urb)  { -	int result; +	int result, dti_busy = 0;  	struct wahc *wa = urb->context;  	struct device *dev = &wa->usb_iface->dev; -	struct wa_xfer_result *xfer_result;  	u32 xfer_id; -	struct wa_xfer *xfer;  	u8 usb_status;  	BUG_ON(wa->dti_urb != urb);  	switch (wa->dti_urb->status) {  	case 0: -		/* We have a xfer result buffer; check it */ -		dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", -			urb->actual_length, urb->transfer_buffer); -		if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { -			dev_err(dev, "DTI Error: xfer result--bad size " -				"xfer result (%d bytes vs %zu needed)\n", -				urb->actual_length, sizeof(*xfer_result)); -			break; -		} -		xfer_result = wa->xfer_result; -		if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { -			dev_err(dev, "DTI Error: xfer result--" -				"bad header length %u\n", -				xfer_result->hdr.bLength); -			break; -		} -		if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { -			dev_err(dev, "DTI Error: xfer result--" -				"bad header type 0x%02x\n", -				xfer_result->hdr.bNotifyType); -			break; -		} -		usb_status = xfer_result->bTransferStatus & 0x3f; -		if (usb_status == WA_XFER_STATUS_NOT_FOUND) -			/* taken care of already */ -			break; -		xfer_id = xfer_result->dwTransferID; -		xfer = wa_xfer_get_by_id(wa, xfer_id); -		if (xfer == NULL) { -			/* FIXME: transaction might have been cancelled */ -			dev_err(dev, "DTI Error: xfer result--" -				"unknown xfer 0x%08x (status 0x%02x)\n", -				xfer_id, usb_status); -			break; +		if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) { +			struct wa_xfer_result *xfer_result; +			struct wa_xfer *xfer; + +			/* We have a xfer result buffer; check it */ +			dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", +				urb->actual_length, urb->transfer_buffer); +			if (urb->actual_length != sizeof(*xfer_result)) { +				dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n", +					urb->actual_length, +					sizeof(*xfer_result)); +				break; +			} +			xfer_result = (struct wa_xfer_result *)(wa->dti_buf); +			if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { +				dev_err(dev, "DTI Error: xfer result--bad header length %u\n", +					xfer_result->hdr.bLength); +				break; +			} +			if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { +				dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n", +					xfer_result->hdr.bNotifyType); +				break; +			} +			xfer_id = le32_to_cpu(xfer_result->dwTransferID); +			usb_status = xfer_result->bTransferStatus & 0x3f; +			if (usb_status == WA_XFER_STATUS_NOT_FOUND) { +				/* taken care of already */ +				dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n", +					__func__, xfer_id, +					xfer_result->bTransferSegment & 0x7f); +				break; +			} +			xfer = wa_xfer_get_by_id(wa, xfer_id); +			if (xfer == NULL) { +				/* FIXME: transaction not found. */ +				dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n", +					xfer_id, usb_status); +				break; +			} +			wa_xfer_result_chew(wa, xfer, xfer_result); +			wa_xfer_put(xfer); +		} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) { +			dti_busy = wa_process_iso_packet_status(wa, urb); +		} else { +			dev_err(dev, "DTI Error: unexpected EP state = %d\n", +				wa->dti_state);  		} -		wa_xfer_result_chew(wa, xfer); -		wa_xfer_put(xfer);  		break;  	case -ENOENT:		/* (we killed the URB)...so, no broadcast */  	case -ESHUTDOWN:	/* going away! */ @@ -1755,18 +2827,69 @@ static void wa_xfer_result_cb(struct urb *urb)  			dev_err(dev, "DTI: URB error %d\n", urb->status);  		break;  	} -	/* Resubmit the DTI URB */ -	result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); -	if (result < 0) { -		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " -			"resetting\n", result); -		wa_reset_all(wa); + +	/* Resubmit the DTI URB if we are not busy processing isoc in frames. */ +	if (!dti_busy) { +		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); +		if (result < 0) { +			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", +				result); +			wa_reset_all(wa); +		}  	}  out:  	return;  }  /* + * Initialize the DTI URB for reading transfer result notifications and also + * the buffer-in URB, for reading buffers. Then we just submit the DTI URB. + */ +int wa_dti_start(struct wahc *wa) +{ +	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; +	struct device *dev = &wa->usb_iface->dev; +	int result = -ENOMEM, index; + +	if (wa->dti_urb != NULL)	/* DTI URB already started */ +		goto out; + +	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); +	if (wa->dti_urb == NULL) { +		dev_err(dev, "Can't allocate DTI URB\n"); +		goto error_dti_urb_alloc; +	} +	usb_fill_bulk_urb( +		wa->dti_urb, wa->usb_dev, +		usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress), +		wa->dti_buf, wa->dti_buf_size, +		wa_dti_cb, wa); + +	/* init the buf in URBs */ +	for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) { +		usb_fill_bulk_urb( +			&(wa->buf_in_urbs[index]), wa->usb_dev, +			usb_rcvbulkpipe(wa->usb_dev, +				0x80 | dti_epd->bEndpointAddress), +			NULL, 0, wa_buf_in_cb, wa); +	} +	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); +	if (result < 0) { +		dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n", +			result); +		goto error_dti_urb_submit; +	} +out: +	return 0; + +error_dti_urb_submit: +	usb_put_urb(wa->dti_urb); +	wa->dti_urb = NULL; +error_dti_urb_alloc: +	return result; +} +EXPORT_SYMBOL_GPL(wa_dti_start); +/*   * Transfer complete notification   *   * Called from the notif.c code. We get a notification on EP2 saying @@ -1777,18 +2900,13 @@ out:   * don't really set it up and start it until the first xfer complete   * notification arrives, which is what we do here.   * - * Follow up in wa_xfer_result_cb(), as that's where the whole state + * Follow up in wa_dti_cb(), as that's where the whole state   * machine starts.   * - * So here we just initialize the DTI URB for reading transfer result - * notifications and also the buffer-in URB, for reading buffers. Then - * we just submit the DTI URB. - *   * @wa shall be referenced   */  void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)  { -	int result;  	struct device *dev = &wa->usb_iface->dev;  	struct wa_notif_xfer *notif_xfer;  	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; @@ -1802,44 +2920,13 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)  			notif_xfer->bEndpoint, dti_epd->bEndpointAddress);  		goto error;  	} -	if (wa->dti_urb != NULL)	/* DTI URB already started */ -		goto out; -	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (wa->dti_urb == NULL) { -		dev_err(dev, "Can't allocate DTI URB\n"); -		goto error_dti_urb_alloc; -	} -	usb_fill_bulk_urb( -		wa->dti_urb, wa->usb_dev, -		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), -		wa->xfer_result, wa->xfer_result_size, -		wa_xfer_result_cb, wa); +	/* attempt to start the DTI ep processing. */ +	if (wa_dti_start(wa) < 0) +		goto error; -	wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (wa->buf_in_urb == NULL) { -		dev_err(dev, "Can't allocate BUF-IN URB\n"); -		goto error_buf_in_urb_alloc; -	} -	usb_fill_bulk_urb( -		wa->buf_in_urb, wa->usb_dev, -		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), -		NULL, 0, wa_buf_in_cb, wa); -	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); -	if (result < 0) { -		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " -			"resetting\n", result); -		goto error_dti_urb_submit; -	} -out:  	return; -error_dti_urb_submit: -	usb_put_urb(wa->buf_in_urb); -error_buf_in_urb_alloc: -	usb_put_urb(wa->dti_urb); -	wa->dti_urb = NULL; -error_dti_urb_alloc:  error:  	wa_reset_all(wa);  } diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c index 742c607d1fa..3e1ba51d1a4 100644 --- a/drivers/usb/wusbcore/wusbhc.c +++ b/drivers/usb/wusbcore/wusbhc.c @@ -55,7 +55,8 @@ static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)   * value of trust_timeout is jiffies.   */  static ssize_t wusb_trust_timeout_show(struct device *dev, -				       struct device_attribute *attr, char *buf) +					struct device_attribute *attr, +					char *buf)  {  	struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); @@ -173,7 +174,8 @@ static ssize_t wusb_phy_rate_store(struct device *dev,  	wusbhc->phy_rate = phy_rate;  	return size;  } -static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store); +static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, +			wusb_phy_rate_store);  static ssize_t wusb_dnts_show(struct device *dev,  				  struct device_attribute *attr, @@ -227,7 +229,8 @@ static ssize_t wusb_retry_count_store(struct device *dev,  	if (result != 1)  		return -EINVAL; -	wusbhc->retry_count = max_t(uint8_t, retry_count, WUSB_RETRY_COUNT_MAX); +	wusbhc->retry_count = max_t(uint8_t, retry_count, +					WUSB_RETRY_COUNT_MAX);  	return size;  } @@ -321,7 +324,8 @@ int wusbhc_b_create(struct wusbhc *wusbhc)  	result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);  	if (result < 0) { -		dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result); +		dev_err(dev, "Cannot register WUSBHC attributes: %d\n", +			result);  		goto error_create_attr_group;  	} @@ -419,13 +423,14 @@ EXPORT_SYMBOL_GPL(wusb_cluster_id_put);   *  - After a successful transfer, update the trust timeout timestamp   *    for the WUSB device.   * - *  - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion + *  - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission   *    condition for the WCONNECTACK_IE is that the host has observed   *    the associated device responding to a control transfer.   */  void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)  { -	struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); +	struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, +					urb->dev);  	if (status == 0 && wusb_dev) {  		wusb_dev->entry_ts = jiffies; diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h index 711b1952b11..2384add4537 100644 --- a/drivers/usb/wusbcore/wusbhc.h +++ b/drivers/usb/wusbcore/wusbhc.h @@ -97,6 +97,7 @@ struct wusb_dev {  	struct kref refcnt;  	struct wusbhc *wusbhc;  	struct list_head cack_node;	/* Connect-Ack list */ +	struct list_head rekey_node;	/* GTK rekey list */  	u8 port_idx;  	u8 addr;  	u8 beacon_type:4; @@ -107,8 +108,6 @@ struct wusb_dev {  	struct usb_wireless_cap_descriptor *wusb_cap_descr;  	struct uwb_mas_bm availability;  	struct work_struct devconnect_acked_work; -	struct urb *set_gtk_urb; -	struct usb_ctrlrequest *set_gtk_req;  	struct usb_device *usb_dev;  }; @@ -165,7 +164,7 @@ struct wusb_port {   * functions/operations that only deal with general Wireless USB HC   * issues use this data type to refer to the host.   * - * @usb_hcd 	   Instantiation of a USB host controller + * @usb_hcd	   Instantiation of a USB host controller   *                 (initialized by upper layer [HWA=HC or WHCI].   *   * @dev		   Device that implements this; initialized by the @@ -197,7 +196,7 @@ struct wusb_port {   * @ports_max	   Number of simultaneous device connections (fake   *                 ports) this HC will take. Read-only.   * - * @port      	   Array of port status for each fake root port. Guaranteed to + * @port	   Array of port status for each fake root port. Guaranteed to   *                 always be the same length during device existence   *                 [this allows for some unlocked but referenced reading].   * @@ -296,8 +295,7 @@ struct wusbhc {  	} __attribute__((packed)) gtk;  	u8 gtk_index;  	u32 gtk_tkid; -	struct work_struct gtk_rekey_done_work; -	int pending_set_gtks; +	struct work_struct gtk_rekey_work;  	struct usb_encryption_descriptor *ccm1_etd;  }; @@ -331,7 +329,8 @@ void wusbhc_pal_unregister(struct wusbhc *wusbhc);   * This is a safe assumption as @usb_dev->bus is referenced all the   * time during the @usb_dev life cycle.   */ -static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev) +static inline +struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)  {  	struct usb_hcd *usb_hcd;  	usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);  | 
