aboutsummaryrefslogtreecommitdiff
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/cbaf.c22
-rw-r--r--drivers/usb/wusbcore/crypto.c2
-rw-r--r--drivers/usb/wusbcore/devconnect.c8
-rw-r--r--drivers/usb/wusbcore/mmc.c11
-rw-r--r--drivers/usb/wusbcore/pal.c1
-rw-r--r--drivers/usb/wusbcore/reservation.c1
-rw-r--r--drivers/usb/wusbcore/security.c42
-rw-r--r--drivers/usb/wusbcore/wa-hc.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.h51
-rw-r--r--drivers/usb/wusbcore/wa-nep.c10
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c25
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c930
-rw-r--r--drivers/usb/wusbcore/wusbhc.c17
-rw-r--r--drivers/usb/wusbcore/wusbhc.h7
14 files changed, 781 insertions, 348 deletions
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index f06ed82e63d..da1b872918b 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -144,7 +144,7 @@ static int cbaf_check(struct cbaf *cbaf)
CBAF_REQ_GET_ASSOCIATION_INFORMATION,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Cannot get available association types: %d\n",
result);
@@ -184,7 +184,7 @@ static int cbaf_check(struct cbaf *cbaf)
assoc_request = itr;
if (top - itr < sizeof(*assoc_request)) {
- dev_err(dev, "Not enough data to decode associaton "
+ dev_err(dev, "Not enough data to decode association "
"request (%zu vs %zu bytes needed)\n",
top - itr, sizeof(*assoc_request));
break;
@@ -235,7 +235,7 @@ static int cbaf_check(struct cbaf *cbaf)
static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
.CHID_hdr = WUSB_AR_CHID,
@@ -260,12 +260,13 @@ static int cbaf_send_host_info(struct cbaf *cbaf)
hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
hi_size = sizeof(*hi) + name_len;
- return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+ return usb_control_msg(cbaf->usb_dev,
+ usb_sndctrlpipe(cbaf->usb_dev, 0),
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0101,
cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- hi, hi_size, 1000 /* FIXME: arbitrary */);
+ hi, hi_size, USB_CTRL_SET_TIMEOUT);
}
/*
@@ -288,9 +289,10 @@ static int cbaf_cdid_get(struct cbaf *cbaf)
CBAF_REQ_GET_ASSOCIATION_REQUEST,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
if (result < 0) {
- dev_err(dev, "Cannot request device information: %d\n", result);
+ dev_err(dev, "Cannot request device information: %d\n",
+ result);
return result;
}
@@ -491,11 +493,11 @@ static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
.AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
- .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
.AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
.AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
.Length_hdr = WUSB_AR_Length,
- .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
+ .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
.ConnectionContext_hdr = WUSB_AR_ConnectionContext,
.BandGroups_hdr = WUSB_AR_BandGroups,
};
@@ -536,7 +538,7 @@ static int cbaf_cc_upload(struct cbaf *cbaf)
CBAF_REQ_SET_ASSOCIATION_RESPONSE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */);
+ ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT);
return result;
}
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 7e4bf95f8f7..9a95b2dc6d1 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -87,7 +87,7 @@ struct aes_ccm_block {
* B1 contains l(a), the MAC header, the encryption offset and padding.
*
* If EO is nonzero, additional blocks are built from payload bytes
- * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The
+ * until EO is exhausted (FIXME: padding to 16 bytes, I guess). The
* padding is not xmitted.
*/
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index f14e7929ba2..0677139c606 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -265,9 +265,9 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work)
* Addresses: because WUSB hosts have no downstream hubs, we can do a
* 1:1 mapping between 'port number' and device
* address. This simplifies many things, as during this
- * initial connect phase the USB stack has no knoledge of
+ * initial connect phase the USB stack has no knowledge of
* the device and hasn't assigned an address yet--we know
- * USB's choose_address() will use the same euristics we
+ * USB's choose_address() will use the same heuristics we
* use here, so we can assume which address will be assigned.
*
* USB stack always assigns address 1 to the root hub, so
@@ -284,7 +284,7 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
struct device *dev = wusbhc->dev;
struct wusb_dev *wusb_dev;
struct wusb_port *port;
- unsigned idx, devnum;
+ unsigned idx;
mutex_lock(&wusbhc->mutex);
@@ -312,8 +312,6 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
goto error_unlock;
}
- devnum = idx + 2;
-
/* Make sure we are using no crypto on that "virtual port" */
wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index b71760c8d3a..3f485df9622 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -206,13 +206,15 @@ int wusbhc_start(struct wusbhc *wusbhc)
result = wusbhc_devconnect_start(wusbhc);
if (result < 0) {
- dev_err(dev, "error enabling device connections: %d\n", result);
+ dev_err(dev, "error enabling device connections: %d\n",
+ result);
goto error_devconnect_start;
}
result = wusbhc_sec_start(wusbhc);
if (result < 0) {
- dev_err(dev, "error starting security in the HC: %d\n", result);
+ dev_err(dev, "error starting security in the HC: %d\n",
+ result);
goto error_sec_start;
}
@@ -284,7 +286,8 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);
if (wusbhc->uwb_rc == NULL) {
result = -ENODEV;
- dev_err(wusbhc->dev, "Cannot get associated UWB Host Controller\n");
+ dev_err(wusbhc->dev,
+ "Cannot get associated UWB Host Controller\n");
goto error_rc_get;
}
@@ -298,7 +301,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
if (chid)
result = uwb_radio_start(&wusbhc->pal);
- else
+ else if (wusbhc->uwb_rc)
uwb_radio_stop(&wusbhc->pal);
return result;
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
index 59e100c2eb5..090f27371a8 100644
--- a/drivers/usb/wusbcore/pal.c
+++ b/drivers/usb/wusbcore/pal.c
@@ -22,6 +22,7 @@ static void wusbhc_channel_changed(struct uwb_pal *pal, int channel)
{
struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal);
+ dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel);
if (channel < 0)
wusbhc_stop(wusbhc);
else
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
index ead79f79392..d5efd0f07d2 100644
--- a/drivers/usb/wusbcore/reservation.c
+++ b/drivers/usb/wusbcore/reservation.c
@@ -51,6 +51,7 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
struct uwb_mas_bm mas;
char buf[72];
+ dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
uwb_rsv_get_usable_mas(rsv, &mas);
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 4c40d0dbf53..95be9953cd4 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -33,7 +33,8 @@ static void wusbhc_gtk_rekey_work(struct work_struct *work);
int wusbhc_sec_create(struct wusbhc *wusbhc)
{
- wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
+ wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) +
+ sizeof(wusbhc->gtk.data);
wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
wusbhc->gtk.descr.bReserved = 0;
wusbhc->gtk_index = 0;
@@ -56,7 +57,7 @@ void wusbhc_sec_destroy(struct wusbhc *wusbhc)
* @wusb_dev: the device whose PTK the TKID is for
* (or NULL for a TKID for a GTK)
*
- * The generated TKID consist of two parts: the device's authenicated
+ * The generated TKID consists of two parts: the device's authenticated
* address (or 0 or a GTK); and an incrementing number. This ensures
* that TKIDs cannot be shared between devices and by the time the
* incrementing number wraps around the older TKIDs will no longer be
@@ -138,7 +139,7 @@ const char *wusb_et_name(u8 x)
case USB_ENC_TYPE_WIRED: return "wired";
case USB_ENC_TYPE_CCM_1: return "CCM-1";
case USB_ENC_TYPE_RSA_1: return "RSA-1";
- default: return "unknown";
+ default: return "unknown";
}
}
EXPORT_SYMBOL_GPL(wusb_et_name);
@@ -165,7 +166,7 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- value, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "Can't set device's WUSB encryption to "
"%s (value %d): %d\n",
@@ -191,7 +192,7 @@ static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_DT_KEY << 8 | key_index, 0,
&wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
- 1000);
+ USB_CTRL_SET_TIMEOUT);
}
@@ -222,7 +223,8 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
secd_size = le16_to_cpu(secd->wTotalLength);
new_secd = krealloc(secd, secd_size, GFP_KERNEL);
if (new_secd == NULL) {
- dev_err(dev, "Can't allocate space for security descriptors\n");
+ dev_err(dev,
+ "Can't allocate space for security descriptors\n");
goto out;
}
secd = new_secd;
@@ -301,8 +303,9 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
/* Set address 0 */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS, 0,
- 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address 0: %d\n",
result);
@@ -316,9 +319,10 @@ int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
/* Set new (authenticated) address. */
result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_ADDRESS, 0,
- new_address, 0, NULL, 0,
- 1000 /* FIXME: arbitrary */);
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ new_address, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "auth failed: can't set address %u: %d\n",
new_address, result);
@@ -375,13 +379,13 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
hs[0].bReserved = 0;
memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
- memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
+ memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
result = usb_control_msg(
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
+ 1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake1: request failed: %d\n", result);
goto error_hs1;
@@ -392,7 +396,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_GET_HANDSHAKE,
USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
+ 2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake2: request failed: %d\n", result);
goto error_hs2;
@@ -422,7 +426,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
}
/* Setup the CCM nonce */
- memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
+ memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
ccm_n.dest_addr.data[0] = wusb_dev->addr;
@@ -469,7 +473,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
usb_dev, usb_sndctrlpipe(usb_dev, 0),
USB_REQ_SET_HANDSHAKE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
+ 3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "Handshake3: request failed: %d\n", result);
goto error_hs3;
@@ -553,11 +557,13 @@ static void wusbhc_gtk_rekey_work(struct work_struct *work)
list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
rekey_node) {
list_del_init(&wusb_dev->rekey_node);
- dev_dbg(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d\n",
+ dev_dbg(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d\n",
__func__, wusb_dev->port_idx);
if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
- dev_err(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d failed\n",
+ dev_err(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d failed\n",
__func__, wusb_dev->port_idx);
}
wusb_dev_put(wusb_dev);
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 368360f9a93..252c7bd9218 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -75,8 +75,6 @@ void __wa_destroy(struct wahc *wa)
if (wa->dti_urb) {
usb_kill_urb(wa->dti_urb);
usb_put_urb(wa->dti_urb);
- usb_kill_urb(wa->buf_in_urb);
- usb_put_urb(wa->buf_in_urb);
}
kfree(wa->dti_buf);
wa_nep_destroy(wa);
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index e614f02f0cf..f2a8d29e17b 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -36,7 +36,7 @@
*
* hcd glue with the USB API Host Controller Interface API.
*
- * nep Notification EndPoint managent: collect notifications
+ * nep Notification EndPoint management: collect notifications
* and queue them with the workqueue daemon.
*
* Handle notifications as coming from the NEP. Sends them
@@ -125,7 +125,8 @@ struct wa_rpipe {
enum wa_dti_state {
WA_DTI_TRANSFER_RESULT_PENDING,
- WA_DTI_ISOC_PACKET_STATUS_PENDING
+ WA_DTI_ISOC_PACKET_STATUS_PENDING,
+ WA_DTI_BUF_IN_DATA_PENDING
};
enum wa_quirks {
@@ -134,8 +135,20 @@ enum wa_quirks {
* requests to be concatenated and not sent as separate packets.
*/
WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
+ /*
+ * The Alereon HWA can be instructed to not send transfer notifications
+ * as an optimization.
+ */
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02,
};
+enum wa_vendor_specific_requests {
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C,
+ WA_REQ_ALEREON_FEATURE_SET = 0x01,
+ WA_REQ_ALEREON_FEATURE_CLEAR = 0x00,
+};
+
+#define WA_MAX_BUF_IN_URBS 4
/**
* Instance of a HWA Host Controller
*
@@ -144,7 +157,7 @@ enum wa_quirks {
*
* @wa_descr Can be accessed without locking because it is in
* the same area where the device descriptors were
- * read, so it is guaranteed to exist umodified while
+ * read, so it is guaranteed to exist unmodified while
* the device exists.
*
* Endianess has been converted to CPU's.
@@ -167,8 +180,8 @@ enum wa_quirks {
* submitted from an atomic context).
*
* FIXME: this needs to be layered up: a wusbhc layer (for sharing
- * comonalities with WHCI), a wa layer (for sharing
- * comonalities with DWA-RC).
+ * commonalities with WHCI), a wa layer (for sharing
+ * commonalities with DWA-RC).
*/
struct wahc {
struct usb_device *usb_dev;
@@ -197,21 +210,23 @@ struct wahc {
struct mutex rpipe_mutex; /* assigning resources to endpoints */
/*
- * dti_state is used to track the state of the dti_urb. When dti_state
+ * dti_state is used to track the state of the dti_urb. When dti_state
* is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
- * dti_isoc_xfer_seg identify which xfer the incoming isoc packet status
- * refers to.
+ * dti_isoc_xfer_seg identify which xfer the incoming isoc packet
+ * status refers to.
*/
enum wa_dti_state dti_state;
u32 dti_isoc_xfer_in_progress;
u8 dti_isoc_xfer_seg;
struct urb *dti_urb; /* URB for reading xfer results */
- struct urb *buf_in_urb; /* URB for reading data in */
+ /* URBs for reading data in */
+ struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS];
+ int active_buf_in_urbs; /* number of buf_in_urbs active. */
struct edc dti_edc; /* DTI error density counter */
void *dti_buf;
size_t dti_buf_size;
- unsigned long dto_in_use; /* protect dto endoint serialization. */
+ unsigned long dto_in_use; /* protect dto endoint serialization */
s32 status; /* For reading status */
@@ -234,6 +249,7 @@ struct wahc {
extern int wa_create(struct wahc *wa, struct usb_interface *iface,
kernel_ulong_t);
extern void __wa_destroy(struct wahc *wa);
+extern int wa_dti_start(struct wahc *wa);
void wa_reset_all(struct wahc *wa);
@@ -275,6 +291,8 @@ static inline void wa_rpipe_init(struct wahc *wa)
static inline void wa_init(struct wahc *wa)
{
+ int index;
+
edc_init(&wa->nep_edc);
atomic_set(&wa->notifs_queued, 0);
wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
@@ -288,6 +306,10 @@ static inline void wa_init(struct wahc *wa)
INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
wa->dto_in_use = 0;
atomic_set(&wa->xfer_id_count, 1);
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
+ usb_init_urb(&(wa->buf_in_urbs[index]));
+ wa->active_buf_in_urbs = 0;
}
/**
@@ -332,7 +354,7 @@ static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
/* Transferring data */
extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
struct urb *, gfp_t);
-extern int wa_urb_dequeue(struct wahc *, struct urb *);
+extern int wa_urb_dequeue(struct wahc *, struct urb *, int);
extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
@@ -345,7 +367,7 @@ extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
* it...no RC specific function is called...unless I miss
* something.
*
- * FIXME: has to go away in favour of an 'struct' hcd based sollution
+ * FIXME: has to go away in favour of a 'struct' hcd based solution
*/
static inline struct wahc *wa_get(struct wahc *wa)
{
@@ -366,7 +388,7 @@ static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
feature,
wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- NULL, 0, 1000 /* FIXME: arbitrary */);
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
}
@@ -400,8 +422,7 @@ s32 __wa_get_status(struct wahc *wa)
USB_REQ_GET_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
- &wa->status, sizeof(wa->status),
- 1000 /* FIXME: arbitrary */);
+ &wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT);
if (result >= 0)
result = wa->status;
return result;
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index ada4e087062..60a10d21947 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -69,8 +69,8 @@ struct wa_notif_work {
* [the wuswad daemon, basically]
*
* @_nw: Pointer to a descriptor which has the pointer to the
- * @wa, the size of the buffer and the work queue
- * structure (so we can free all when done).
+ * @wa, the size of the buffer and the work queue
+ * structure (so we can free all when done).
* @returns 0 if ok, < 0 errno code on error.
*
* All notifications follow the same format; they need to start with a
@@ -93,7 +93,8 @@ static void wa_notif_dispatch(struct work_struct *ws)
{
void *itr;
u8 missing = 0;
- struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
+ struct wa_notif_work *nw = container_of(ws, struct wa_notif_work,
+ work);
struct wahc *wa = nw->wa;
struct wa_notif_hdr *notif_hdr;
size_t size;
@@ -271,7 +272,8 @@ int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
wa->nep_buffer_size = 1024;
wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
if (wa->nep_buffer == NULL) {
- dev_err(dev, "Unable to allocate notification's read buffer\n");
+ dev_err(dev,
+ "Unable to allocate notification's read buffer\n");
goto error_nep_buffer;
}
wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index b48e74cc54d..a80c5d284b5 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -57,7 +57,6 @@
* urb->dev->devnum, to make sure that we always have the right
* destination address.
*/
-#include <linux/init.h>
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -80,7 +79,7 @@ static int __rpipe_get_descr(struct wahc *wa,
USB_REQ_GET_DESCRIPTOR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- 1000 /* FIXME: arbitrary */);
+ USB_CTRL_GET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
index, (int)result);
@@ -118,7 +117,7 @@ static int __rpipe_set_descr(struct wahc *wa,
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
- HZ / 10);
+ USB_CTRL_SET_TIMEOUT);
if (result < 0) {
dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
index, (int)result);
@@ -184,7 +183,7 @@ EXPORT_SYMBOL_GPL(rpipe_destroy);
/*
* Locate an idle rpipe, create an structure for it and return it
*
- * @wa is referenced and unlocked
+ * @wa is referenced and unlocked
* @crs enum rpipe_attr, required endpoint characteristics
*
* The rpipe can be used only sequentially (not in parallel).
@@ -237,7 +236,7 @@ static int __rpipe_reset(struct wahc *wa, unsigned index)
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_RESET,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (result < 0)
dev_err(dev, "rpipe %u: reset failed: %d\n",
index, result);
@@ -299,7 +298,7 @@ static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
break;
}
itr += hdr->bLength;
- itr_size -= hdr->bDescriptorType;
+ itr_size -= hdr->bLength;
}
out:
return epcd;
@@ -308,7 +307,7 @@ out:
/*
* Aim an rpipe to its device & endpoint destination
*
- * Make sure we change the address to unauthenticathed if the device
+ * Make sure we change the address to unauthenticated if the device
* is WUSB and it is not authenticated.
*/
static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
@@ -329,7 +328,8 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
}
unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
__rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
- atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
+ atomic_set(&rpipe->segs_available,
+ le16_to_cpu(rpipe->descr.wRequests));
/* FIXME: block allocation system; request with queuing and timeout */
/* FIXME: compute so seg_size > ep->maxpktsize */
rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
@@ -524,10 +524,10 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
usb_control_msg(
- wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_ABORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
rpipe_put(rpipe);
}
mutex_unlock(&wa->rpipe_mutex);
@@ -545,12 +545,11 @@ void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
usb_control_msg(
- wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_CLEAR_FEATURE,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
- RPIPE_STALL, index, NULL, 0, 1000);
+ RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
-
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index ed5abe87b04..3e2e4ed2015 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -79,7 +79,6 @@
* availability of the different required components (blocks,
* rpipes, segment slots, etc), we go scheduling them. Painful.
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/hash.h>
@@ -124,6 +123,8 @@ struct wa_seg {
u8 index; /* which segment we are */
int isoc_frame_count; /* number of isoc frames in this segment. */
int isoc_frame_offset; /* starting frame offset in the xfer URB. */
+ /* Isoc frame that the current transfer buffer corresponds to. */
+ int isoc_frame_index;
int isoc_size; /* size of all isoc frames sent by this seg. */
enum wa_seg_status status;
ssize_t result; /* bytes xfered or error */
@@ -158,8 +159,6 @@ struct wa_xfer {
unsigned is_dma:1;
size_t seg_size;
int result;
- /* Isoc frame that the current transfer buffer corresponds to. */
- int dto_isoc_frame_index;
gfp_t gfp; /* allocation mask */
@@ -168,6 +167,8 @@ struct wa_xfer {
static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
struct wa_seg *seg, int curr_iso_frame);
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ int starting_index, enum wa_seg_status status);
static inline void wa_xfer_init(struct wa_xfer *xfer)
{
@@ -282,6 +283,7 @@ static void wa_xfer_giveback(struct wa_xfer *xfer)
spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
list_del_init(&xfer->list_node);
+ usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
/* FIXME: segmentation broken -- kills DWA */
wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
@@ -367,15 +369,15 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
break;
case WA_SEG_ERROR:
xfer->result = seg->result;
- dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n",
+ dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
xfer, wa_xfer_id(xfer), seg->index, seg->result,
seg->result);
goto out;
case WA_SEG_ABORTED:
- dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n",
- xfer, wa_xfer_id(xfer), seg->index,
- urb->status);
- xfer->result = urb->status;
+ xfer->result = seg->result;
+ dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
+ xfer, wa_xfer_id(xfer), seg->index, seg->result,
+ seg->result);
goto out;
default:
dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
@@ -390,6 +392,24 @@ out:
}
/*
+ * Mark the given segment as done. Return true if this completes the xfer.
+ * This should only be called for segs that have been submitted to an RPIPE.
+ * Delayed segs are not marked as submitted so they do not need to be marked
+ * as done when cleaning up.
+ *
+ * xfer->lock has to be locked
+ */
+static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
+ struct wa_seg *seg, enum wa_seg_status status)
+{
+ seg->status = status;
+ xfer->segs_done++;
+
+ /* check for done. */
+ return __wa_xfer_is_done(xfer);
+}
+
+/*
* Search for a transfer list ID on the HCD's URB list
*
* For 32 bit architectures, we use the pointer itself; for 64 bits, a
@@ -416,12 +436,51 @@ out:
struct wa_xfer_abort_buffer {
struct urb urb;
+ struct wahc *wa;
struct wa_xfer_abort cmd;
};
static void __wa_xfer_abort_cb(struct urb *urb)
{
struct wa_xfer_abort_buffer *b = urb->context;
+ struct wahc *wa = b->wa;
+
+ /*
+ * If the abort request URB failed, then the HWA did not get the abort
+ * command. Forcibly clean up the xfer without waiting for a Transfer
+ * Result from the HWA.
+ */
+ if (urb->status < 0) {
+ struct wa_xfer *xfer;
+ struct device *dev = &wa->usb_iface->dev;
+
+ xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
+ dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
+ __func__, urb->status);
+ if (xfer) {
+ unsigned long flags;
+ int done;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
+ __func__, xfer, wa_xfer_id(xfer));
+ spin_lock_irqsave(&xfer->lock, flags);
+ /* mark all segs as aborted. */
+ wa_complete_remaining_xfer_segs(xfer, 0,
+ WA_SEG_ABORTED);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ } else {
+ dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
+ __func__, le32_to_cpu(b->cmd.dwTransferID));
+ }
+ }
+
+ wa_put(wa); /* taken in __wa_xfer_abort */
usb_put_urb(&b->urb);
}
@@ -449,6 +508,7 @@ static int __wa_xfer_abort(struct wa_xfer *xfer)
b->cmd.bRequestType = WA_XFER_ABORT;
b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
+ b->wa = wa_get(xfer->wa);
usb_init_urb(&b->urb);
usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
@@ -462,6 +522,7 @@ static int __wa_xfer_abort(struct wa_xfer *xfer)
error_submit:
+ wa_put(xfer->wa);
if (printk_ratelimit())
dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
xfer, result);
@@ -487,13 +548,14 @@ static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
&& ((segment_size + iso_frame_desc[index].length)
<= xfer->seg_size)) {
/*
- * For Alereon HWA devices, only include an isoc frame in a
- * segment if it is physically contiguous with the previous
+ * For Alereon HWA devices, only include an isoc frame in an
+ * out segment if it is physically contiguous with the previous
* frame. This is required because those devices expect
* the isoc frames to be sent as a single USB transaction as
* opposed to one transaction per frame with standard HWA.
*/
if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ && (xfer->is_inbound == 0)
&& (index > isoc_frame_offset)
&& ((iso_frame_desc[index - 1].offset +
iso_frame_desc[index - 1].length) !=
@@ -536,14 +598,8 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
result = sizeof(struct wa_xfer_bi);
break;
case USB_ENDPOINT_XFER_ISOC:
- if (usb_pipeout(urb->pipe)) {
- *pxfer_type = WA_XFER_TYPE_ISO;
- result = sizeof(struct wa_xfer_hwaiso);
- } else {
- dev_err(dev, "FIXME: ISOC IN not implemented\n");
- result = -ENOSYS;
- goto error;
- }
+ *pxfer_type = WA_XFER_TYPE_ISO;
+ result = sizeof(struct wa_xfer_hwaiso);
break;
default:
/* never happens */
@@ -554,10 +610,22 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
+ xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
+ * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
+ /* Compute the segment size and make sure it is a multiple of
+ * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
+ * a check (FIXME) */
+ if (xfer->seg_size < maxpktsize) {
+ dev_err(dev,
+ "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
+ xfer->seg_size, maxpktsize);
+ result = -EINVAL;
+ goto error;
+ }
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
int index = 0;
- xfer->seg_size = maxpktsize;
xfer->segs = 0;
/*
* loop over urb->number_of_packets to determine how many
@@ -570,19 +638,6 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
++xfer->segs;
}
} else {
- xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
- * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
- /* Compute the segment size and make sure it is a multiple of
- * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
- * a check (FIXME) */
- if (xfer->seg_size < maxpktsize) {
- dev_err(dev,
- "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
- xfer->seg_size, maxpktsize);
- result = -EINVAL;
- goto error;
- }
- xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
xfer->seg_size);
if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
@@ -700,23 +755,23 @@ static void wa_seg_dto_cb(struct urb *urb)
if (usb_pipeisoc(xfer->urb->pipe)) {
/* Alereon HWA sends all isoc frames in a single transfer. */
if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
- xfer->dto_isoc_frame_index += seg->isoc_frame_count;
+ seg->isoc_frame_index += seg->isoc_frame_count;
else
- xfer->dto_isoc_frame_index += 1;
- if (xfer->dto_isoc_frame_index < seg->isoc_frame_count) {
+ seg->isoc_frame_index += 1;
+ if (seg->isoc_frame_index < seg->isoc_frame_count) {
data_send_done = 0;
holding_dto = 1; /* checked in error cases. */
/*
* if this is the last isoc frame of the segment, we
* can release DTO after sending this frame.
*/
- if ((xfer->dto_isoc_frame_index + 1) >=
+ if ((seg->isoc_frame_index + 1) >=
seg->isoc_frame_count)
release_dto = 1;
}
dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
- wa_xfer_id(xfer), seg->index,
- xfer->dto_isoc_frame_index, holding_dto, release_dto);
+ wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
+ holding_dto, release_dto);
}
spin_unlock_irqrestore(&xfer->lock, flags);
@@ -736,10 +791,11 @@ static void wa_seg_dto_cb(struct urb *urb)
* send the URB and release DTO if we no longer need it.
*/
__wa_populate_dto_urb_isoc(xfer, seg,
- seg->isoc_frame_offset +
- xfer->dto_isoc_frame_index);
+ seg->isoc_frame_offset + seg->isoc_frame_index);
/* resubmit the URB with the next isoc frame. */
+ /* take a ref on resubmit. */
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
@@ -767,9 +823,13 @@ static void wa_seg_dto_cb(struct urb *urb)
goto error_default;
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
return;
error_dto_submit:
+ /* taken on resubmit attempt. */
+ wa_xfer_put(xfer);
error_default:
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
@@ -779,12 +839,10 @@ error_default:
wa_reset_all(wa);
}
if (seg->status != WA_SEG_ERROR) {
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (holding_dto) {
@@ -795,7 +853,8 @@ error_default:
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
-
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -844,17 +903,16 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
wa_xfer_id(xfer), seg->index, urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
- dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
+ dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
wa_reset_all(wa);
}
if (seg->status != WA_SEG_ERROR) {
usb_unlink_urb(seg->dto_urb);
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -862,6 +920,8 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -926,18 +986,18 @@ static void wa_seg_tr_cb(struct urb *urb)
}
usb_unlink_urb(seg->isoc_pack_desc_urb);
usb_unlink_urb(seg->dto_urb);
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
__wa_xfer_abort(xfer);
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
}
/*
@@ -947,7 +1007,7 @@ static void wa_seg_tr_cb(struct urb *urb)
*/
static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
const unsigned int bytes_transferred,
- const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
+ const unsigned int bytes_to_transfer, int *out_num_sgs)
{
struct scatterlist *out_sg;
unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
@@ -1101,14 +1161,13 @@ static int __wa_populate_dto_urb(struct wa_xfer *xfer,
*/
static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
{
- int result, cnt, iso_frame_offset;
+ int result, cnt, isoc_frame_offset = 0;
size_t alloc_size = sizeof(*xfer->seg[0])
- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
struct usb_device *usb_dev = xfer->wa->usb_dev;
const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
struct wa_seg *seg;
size_t buf_itr, buf_size, buf_itr_size;
- int xfer_isoc_frame_offset = 0;
result = -ENOMEM;
xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
@@ -1116,15 +1175,18 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
goto error_segs_kzalloc;
buf_itr = 0;
buf_size = xfer->urb->transfer_buffer_length;
- iso_frame_offset = 0;
for (cnt = 0; cnt < xfer->segs; cnt++) {
size_t iso_pkt_descr_size = 0;
int seg_isoc_frame_count = 0, seg_isoc_size = 0;
+ /*
+ * Adjust the size of the segment object to contain space for
+ * the isoc packet descriptor buffer.
+ */
if (usb_pipeisoc(xfer->urb->pipe)) {
seg_isoc_frame_count =
__wa_seg_calculate_isoc_frame_count(xfer,
- xfer_isoc_frame_offset, &seg_isoc_size);
+ isoc_frame_offset, &seg_isoc_size);
iso_pkt_descr_size =
sizeof(struct wa_xfer_packet_info_hwaiso) +
@@ -1137,15 +1199,40 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
wa_seg_init(seg);
seg->xfer = xfer;
seg->index = cnt;
- seg->isoc_frame_count = seg_isoc_frame_count;
- seg->isoc_frame_offset = xfer_isoc_frame_offset;
- seg->isoc_size = seg_isoc_size;
usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
usb_sndbulkpipe(usb_dev,
dto_epd->bEndpointAddress),
&seg->xfer_hdr, xfer_hdr_size,
wa_seg_tr_cb, seg);
buf_itr_size = min(buf_size, xfer->seg_size);
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ seg->isoc_frame_count = seg_isoc_frame_count;
+ seg->isoc_frame_offset = isoc_frame_offset;
+ seg->isoc_size = seg_isoc_size;
+ /* iso packet descriptor. */
+ seg->isoc_pack_desc_urb =
+ usb_alloc_urb(0, GFP_ATOMIC);
+ if (seg->isoc_pack_desc_urb == NULL)
+ goto error_iso_pack_desc_alloc;
+ /*
+ * The buffer for the isoc packet descriptor starts
+ * after the transfer request header in the
+ * segment object memory buffer.
+ */
+ usb_fill_bulk_urb(
+ seg->isoc_pack_desc_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ (void *)(&seg->xfer_hdr) +
+ xfer_hdr_size,
+ iso_pkt_descr_size,
+ wa_seg_iso_pack_desc_cb, seg);
+
+ /* adjust starting frame offset for next seg. */
+ isoc_frame_offset += seg_isoc_frame_count;
+ }
+
if (xfer->is_inbound == 0 && buf_size > 0) {
/* outbound data. */
seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -1158,25 +1245,6 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
NULL, 0, wa_seg_dto_cb, seg);
if (usb_pipeisoc(xfer->urb->pipe)) {
- /* iso packet descriptor. */
- seg->isoc_pack_desc_urb =
- usb_alloc_urb(0, GFP_ATOMIC);
- if (seg->isoc_pack_desc_urb == NULL)
- goto error_iso_pack_desc_alloc;
- /*
- * The buffer for the isoc packet descriptor
- * after the transfer request header in the
- * segment object memory buffer.
- */
- usb_fill_bulk_urb(
- seg->isoc_pack_desc_urb, usb_dev,
- usb_sndbulkpipe(usb_dev,
- dto_epd->bEndpointAddress),
- (void *)(&seg->xfer_hdr) +
- xfer_hdr_size,
- iso_pkt_descr_size,
- wa_seg_iso_pack_desc_cb, seg);
-
/*
* Fill in the xfer buffer information for the
* first isoc frame. Subsequent frames in this
@@ -1184,9 +1252,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
* DTO completion routine, if needed.
*/
__wa_populate_dto_urb_isoc(xfer, seg,
- xfer_isoc_frame_offset);
- /* adjust starting frame offset for next seg. */
- xfer_isoc_frame_offset += seg_isoc_frame_count;
+ seg->isoc_frame_offset);
} else {
/* fill in the xfer buffer information. */
result = __wa_populate_dto_urb(xfer, seg,
@@ -1207,10 +1273,11 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
* Use the fact that cnt is left at were it failed. The remaining
* segments will be cleaned up by wa_xfer_destroy.
*/
-error_iso_pack_desc_alloc:
error_seg_outbound_populate:
usb_free_urb(xfer->seg[cnt]->dto_urb);
error_dto_alloc:
+ usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
+error_iso_pack_desc_alloc:
kfree(xfer->seg[cnt]);
xfer->seg[cnt] = NULL;
error_seg_kmalloc:
@@ -1259,8 +1326,11 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
for (cnt = 1; cnt < xfer->segs; cnt++) {
struct wa_xfer_packet_info_hwaiso *packet_desc;
struct wa_seg *seg = xfer->seg[cnt];
+ struct wa_xfer_hwaiso *xfer_iso;
xfer_hdr = &seg->xfer_hdr;
+ xfer_iso = container_of(xfer_hdr,
+ struct wa_xfer_hwaiso, hdr);
packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
/*
* Copy values from the 0th header. Segment specific
@@ -1270,6 +1340,8 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
xfer_hdr->bTransferSegment = cnt;
xfer_hdr->dwTransferLength =
cpu_to_le32(seg->isoc_size);
+ xfer_iso->dwNumOfPackets =
+ cpu_to_le32(seg->isoc_frame_count);
__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
seg->status = WA_SEG_READY;
}
@@ -1311,43 +1383,52 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
/* default to done unless we encounter a multi-frame isoc segment. */
*dto_done = 1;
+ /*
+ * Take a ref for each segment urb so the xfer cannot disappear until
+ * all of the callbacks run.
+ */
+ wa_xfer_get(xfer);
/* submit the transfer request. */
+ seg->status = WA_SEG_SUBMITTED;
result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
if (result < 0) {
pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
__func__, xfer, seg->index, result);
- goto error_seg_submit;
+ wa_xfer_put(xfer);
+ goto error_tr_submit;
}
/* submit the isoc packet descriptor if present. */
if (seg->isoc_pack_desc_urb) {
- struct wahc *wa = xfer->wa;
-
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
+ seg->isoc_frame_index = 0;
if (result < 0) {
pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
__func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
goto error_iso_pack_desc_submit;
}
- xfer->dto_isoc_frame_index = 0;
- /*
- * If this segment contains more than one isoc frame, hold
- * onto the dto resource until we send all frames.
- * Only applies to non-Alereon devices.
- */
- if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
- && (seg->isoc_frame_count > 1))
- *dto_done = 0;
}
/* submit the out data if this is an out request. */
if (seg->dto_urb) {
+ struct wahc *wa = xfer->wa;
+ wa_xfer_get(xfer);
result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
if (result < 0) {
pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
__func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
goto error_dto_submit;
}
+ /*
+ * If this segment contains more than one isoc frame, hold
+ * onto the dto resource until we send all frames.
+ * Only applies to non-Alereon devices.
+ */
+ if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
+ && (seg->isoc_frame_count > 1))
+ *dto_done = 0;
}
- seg->status = WA_SEG_SUBMITTED;
rpipe_avail_dec(rpipe);
return 0;
@@ -1355,7 +1436,7 @@ error_dto_submit:
usb_unlink_urb(seg->isoc_pack_desc_urb);
error_iso_pack_desc_submit:
usb_unlink_urb(&seg->tr_urb);
-error_seg_submit:
+error_tr_submit:
seg->status = WA_SEG_ERROR;
seg->result = result;
*dto_done = 1;
@@ -1387,6 +1468,12 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
list_node);
list_del(&seg->list_node);
xfer = seg->xfer;
+ /*
+ * Get a reference to the xfer in case the callbacks for the
+ * URBs submitted by __wa_seg_submit attempt to complete
+ * the xfer before this function completes.
+ */
+ wa_xfer_get(xfer);
result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
/* release the dto resource if this RPIPE is done with it. */
if (dto_done)
@@ -1395,13 +1482,23 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
xfer, wa_xfer_id(xfer), seg->index,
atomic_read(&rpipe->segs_available), result);
if (unlikely(result < 0)) {
+ int done;
+
spin_unlock_irqrestore(&rpipe->seg_lock, flags);
spin_lock_irqsave(&xfer->lock, flags);
__wa_xfer_abort(xfer);
+ /*
+ * This seg was marked as submitted when it was put on
+ * the RPIPE seg_list. Mark it done.
+ */
xfer->segs_done++;
+ done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
spin_lock_irqsave(&rpipe->seg_lock, flags);
}
+ wa_xfer_put(xfer);
}
/*
* Mark this RPIPE as waiting if dto was not acquired, there are
@@ -1567,7 +1664,8 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer)
wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
if (wusb_dev == NULL) {
mutex_unlock(&wusbhc->mutex);
- pr_err("%s: error wusb dev gone\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
+ __func__);
goto error_dev_gone;
}
mutex_unlock(&wusbhc->mutex);
@@ -1576,21 +1674,28 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer)
xfer->wusb_dev = wusb_dev;
result = urb->status;
if (urb->status != -EINPROGRESS) {
- pr_err("%s: error_dequeued\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
goto error_dequeued;
}
result = __wa_xfer_setup(xfer, urb);
if (result < 0) {
- pr_err("%s: error_xfer_setup\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
goto error_xfer_setup;
}
+ /*
+ * Get a xfer reference since __wa_xfer_submit starts asynchronous
+ * operations that may try to complete the xfer before this function
+ * exits.
+ */
+ wa_xfer_get(xfer);
result = __wa_xfer_submit(xfer);
if (result < 0) {
- pr_err("%s: error_xfer_submit\n", __func__);
+ dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
goto error_xfer_submit;
}
spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
return 0;
/*
@@ -1616,6 +1721,7 @@ error_xfer_submit:
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
+ wa_xfer_put(xfer);
/* return success since the completion routine will run. */
return 0;
}
@@ -1730,6 +1836,12 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
dump_stack();
}
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ if (result < 0)
+ goto error_link_urb;
+
result = -ENOMEM;
xfer = kzalloc(sizeof(*xfer), gfp);
if (xfer == NULL)
@@ -1769,6 +1881,9 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
__func__, result);
wa_put(xfer->wa);
wa_xfer_put(xfer);
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
return result;
}
}
@@ -1777,6 +1892,10 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
error_dequeued:
kfree(xfer);
error_kmalloc:
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+error_link_urb:
return result;
}
EXPORT_SYMBOL_GPL(wa_urb_enqueue);
@@ -1799,7 +1918,7 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue);
* asynch request] and then make sure we cancel each segment.
*
*/
-int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
+int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
{
unsigned long flags, flags2;
struct wa_xfer *xfer;
@@ -1807,24 +1926,43 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
struct wa_rpipe *rpipe;
unsigned cnt, done = 0, xfer_abort_pending;
unsigned rpipe_ready = 0;
+ int result;
- xfer = urb->hcpriv;
- if (xfer == NULL) {
+ /* check if it is safe to unlink. */
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
+ if ((result == 0) && urb->hcpriv) {
/*
- * Nothing setup yet enqueue will see urb->status !=
- * -EINPROGRESS (by hcd layer) and bail out with
- * error, no need to do completion
+ * Get a xfer ref to prevent a race with wa_xfer_giveback
+ * cleaning up the xfer while we are working with it.
*/
- BUG_ON(urb->status == -EINPROGRESS);
- goto out;
+ wa_xfer_get(urb->hcpriv);
}
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+ if (result)
+ return result;
+
+ xfer = urb->hcpriv;
+ if (xfer == NULL)
+ return -ENOENT;
spin_lock_irqsave(&xfer->lock, flags);
pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
rpipe = xfer->ep->hcpriv;
if (rpipe == NULL) {
- pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
- __func__, wa_xfer_id(xfer),
+ pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
+ __func__, xfer, wa_xfer_id(xfer),
"Probably already aborted.\n" );
+ result = -ENOENT;
+ goto out_unlock;
+ }
+ /*
+ * Check for done to avoid racing with wa_xfer_giveback and completing
+ * twice.
+ */
+ if (__wa_xfer_is_done(xfer)) {
+ pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
+ xfer, wa_xfer_id(xfer));
+ result = -ENOENT;
goto out_unlock;
}
/* Check the delayed list -> if there, release and complete */
@@ -1836,6 +1974,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
goto out_unlock; /* setup(), enqueue_b() completes */
/* Ok, the xfer is in flight already, it's been setup and submitted.*/
xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
+ /*
+ * grab the rpipe->seg_lock here to prevent racing with
+ * __wa_xfer_delayed_run.
+ */
+ spin_lock(&rpipe->seg_lock);
for (cnt = 0; cnt < xfer->segs; cnt++) {
seg = xfer->seg[cnt];
pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
@@ -1855,16 +1998,25 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
* segments will be completed in the DTI interrupt.
*/
seg->status = WA_SEG_ABORTED;
- spin_lock_irqsave(&rpipe->seg_lock, flags2);
+ seg->result = -ENOENT;
list_del(&seg->list_node);
xfer->segs_done++;
- spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
break;
case WA_SEG_DONE:
case WA_SEG_ERROR:
case WA_SEG_ABORTED:
break;
/*
+ * The buf_in data for a segment in the
+ * WA_SEG_DTI_PENDING state is actively being read.
+ * Let wa_buf_in_cb handle it since it will be called
+ * and will increment xfer->segs_done. Cleaning up
+ * here could cause wa_buf_in_cb to access the xfer
+ * after it has been completed/freed.
+ */
+ case WA_SEG_DTI_PENDING:
+ break;
+ /*
* In the states below, the HWA device already knows
* about the transfer. If an abort request was sent,
* allow the HWA to process it and wait for the
@@ -1873,7 +2025,6 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
*/
case WA_SEG_SUBMITTED:
case WA_SEG_PENDING:
- case WA_SEG_DTI_PENDING:
/*
* Check if the abort was successfully sent. This could
* be false if the HWA has been removed but we haven't
@@ -1887,6 +2038,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
break;
}
}
+ spin_unlock(&rpipe->seg_lock);
xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
done = __wa_xfer_is_done(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
@@ -1894,12 +2046,13 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
- return 0;
+ wa_xfer_put(xfer);
+ return result;
out_unlock:
spin_unlock_irqrestore(&xfer->lock, flags);
-out:
- return 0;
+ wa_xfer_put(xfer);
+ return result;
dequeue_delayed:
list_del_init(&xfer->list_node);
@@ -1907,6 +2060,7 @@ dequeue_delayed:
xfer->result = urb->status;
spin_unlock_irqrestore(&xfer->lock, flags);
wa_xfer_giveback(xfer);
+ wa_xfer_put(xfer);
usb_put_urb(urb); /* we got a ref in enqueue() */
return 0;
}
@@ -1935,7 +2089,7 @@ static int wa_xfer_status_to_errno(u8 status)
[WA_XFER_STATUS_NOT_FOUND] = 0,
[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
[WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
- [WA_XFER_STATUS_ABORTED] = -EINTR,
+ [WA_XFER_STATUS_ABORTED] = -ENOENT,
[WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
[WA_XFER_INVALID_FORMAT] = EINVAL,
[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
@@ -1966,15 +2120,17 @@ static int wa_xfer_status_to_errno(u8 status)
* no other segment transfer results will be returned from the device.
* Mark the remaining submitted or pending xfers as completed so that
* the xfer will complete cleanly.
+ *
+ * xfer->lock must be held
+ *
*/
static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
- struct wa_seg *incoming_seg)
+ int starting_index, enum wa_seg_status status)
{
int index;
struct wa_rpipe *rpipe = xfer->ep->hcpriv;
- for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
- index++) {
+ for (index = starting_index; index < xfer->segs_submitted; index++) {
struct wa_seg *current_seg = xfer->seg[index];
BUG_ON(current_seg == NULL);
@@ -1990,7 +2146,7 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
*/
case WA_SEG_DELAYED:
xfer->segs_done++;
- current_seg->status = incoming_seg->status;
+ current_seg->status = status;
break;
case WA_SEG_ABORTED:
break;
@@ -2003,6 +2159,114 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
}
}
+/* Populate the given urb based on the current isoc transfer state. */
+static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
+ struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
+{
+ int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
+ int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
+ int next_frame_contiguous;
+ struct usb_iso_packet_descriptor *iso_frame;
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+
+ /*
+ * If the current frame actual_length is contiguous with the next frame
+ * and actual_length is a multiple of the DTI endpoint max packet size,
+ * combine the current frame with the next frame in a single URB. This
+ * reduces the number of URBs that must be submitted in that case.
+ */
+ seg_index = seg->isoc_frame_index;
+ do {
+ next_frame_contiguous = 0;
+
+ iso_frame = &iso_frame_desc[urb_frame_index];
+ total_len += iso_frame->actual_length;
+ ++urb_frame_index;
+ ++seg_index;
+
+ if (seg_index < seg->isoc_frame_count) {
+ struct usb_iso_packet_descriptor *next_iso_frame;
+
+ next_iso_frame = &iso_frame_desc[urb_frame_index];
+
+ if ((iso_frame->offset + iso_frame->actual_length) ==
+ next_iso_frame->offset)
+ next_frame_contiguous = 1;
+ }
+ } while (next_frame_contiguous
+ && ((iso_frame->actual_length % dti_packet_size) == 0));
+
+ /* this should always be 0 before a resubmit. */
+ buf_in_urb->num_mapped_sgs = 0;
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
+ iso_frame_desc[urb_start_frame].offset;
+ buf_in_urb->transfer_buffer_length = total_len;
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ buf_in_urb->context = seg;
+
+ /* return the number of frames included in this URB. */
+ return seg_index - seg->isoc_frame_index;
+}
+
+/* Populate the given urb based on the current transfer state. */
+static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
+ unsigned int seg_idx, unsigned int bytes_transferred)
+{
+ int result = 0;
+ struct wa_seg *seg = xfer->seg[seg_idx];
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+ /* this should always be 0 before a resubmit. */
+ buf_in_urb->num_mapped_sgs = 0;
+
+ if (xfer->is_dma) {
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ + (seg_idx * xfer->seg_size);
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ } else {
+ /* do buffer or SG processing. */
+ buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
+
+ if (xfer->urb->transfer_buffer) {
+ buf_in_urb->transfer_buffer =
+ xfer->urb->transfer_buffer
+ + (seg_idx * xfer->seg_size);
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ } else {
+ /* allocate an SG list to store seg_size bytes
+ and copy the subset of the xfer->urb->sg
+ that matches the buffer subset we are
+ about to read. */
+ buf_in_urb->sg = wa_xfer_create_subset_sg(
+ xfer->urb->sg,
+ seg_idx * xfer->seg_size,
+ bytes_transferred,
+ &(buf_in_urb->num_sgs));
+
+ if (!(buf_in_urb->sg)) {
+ buf_in_urb->num_sgs = 0;
+ result = -ENOMEM;
+ }
+ buf_in_urb->transfer_buffer = NULL;
+ }
+ }
+ buf_in_urb->transfer_buffer_length = bytes_transferred;
+ buf_in_urb->context = seg;
+
+ return result;
+}
+
/*
* Process a xfer result completion message
*
@@ -2016,12 +2280,14 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
int result;
struct device *dev = &wa->usb_iface->dev;
unsigned long flags;
- u8 seg_idx;
+ unsigned int seg_idx;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
unsigned done = 0;
u8 usb_status;
unsigned rpipe_ready = 0;
+ unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
+ struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
spin_lock_irqsave(&xfer->lock, flags);
seg_idx = xfer_result->bTransferSegment & 0x7f;
@@ -2045,7 +2311,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
}
if (usb_status & 0x80) {
seg->result = wa_xfer_status_to_errno(usb_status);
- dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
+ dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
xfer, xfer->id, seg->index, usb_status);
seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
WA_SEG_ABORTED : WA_SEG_ERROR;
@@ -2054,69 +2320,39 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
/* FIXME: we ignore warnings, tally them for stats */
if (usb_status & 0x40) /* Warning?... */
usb_status = 0; /* ... pass */
- if (usb_pipeisoc(xfer->urb->pipe)) {
+ /*
+ * If the last segment bit is set, complete the remaining segments.
+ * When the current segment is completed, either in wa_buf_in_cb for
+ * transfers with data or below for no data, the xfer will complete.
+ */
+ if (xfer_result->bTransferSegment & 0x80)
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
+ WA_SEG_DONE);
+ if (usb_pipeisoc(xfer->urb->pipe)
+ && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
/* set up WA state to read the isoc packet status next. */
wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
wa->dti_isoc_xfer_seg = seg_idx;
wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
- } else if (xfer->is_inbound) { /* IN data phase: read to buffer */
+ } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
+ && (bytes_transferred > 0)) {
+ /* IN data phase: read to buffer */
seg->status = WA_SEG_DTI_PENDING;
- BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
- /* this should always be 0 before a resubmit. */
- wa->buf_in_urb->num_mapped_sgs = 0;
-
- if (xfer->is_dma) {
- wa->buf_in_urb->transfer_dma =
- xfer->urb->transfer_dma
- + (seg_idx * xfer->seg_size);
- wa->buf_in_urb->transfer_flags
- |= URB_NO_TRANSFER_DMA_MAP;
- wa->buf_in_urb->transfer_buffer = NULL;
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
- } else {
- /* do buffer or SG processing. */
- wa->buf_in_urb->transfer_flags
- &= ~URB_NO_TRANSFER_DMA_MAP;
-
- if (xfer->urb->transfer_buffer) {
- wa->buf_in_urb->transfer_buffer =
- xfer->urb->transfer_buffer
- + (seg_idx * xfer->seg_size);
- wa->buf_in_urb->sg = NULL;
- wa->buf_in_urb->num_sgs = 0;
- } else {
- /* allocate an SG list to store seg_size bytes
- and copy the subset of the xfer->urb->sg
- that matches the buffer subset we are
- about to read. */
- wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
- xfer->urb->sg,
- seg_idx * xfer->seg_size,
- le32_to_cpu(
- xfer_result->dwTransferLength),
- &(wa->buf_in_urb->num_sgs));
-
- if (!(wa->buf_in_urb->sg)) {
- wa->buf_in_urb->num_sgs = 0;
- goto error_sg_alloc;
- }
- wa->buf_in_urb->transfer_buffer = NULL;
- }
- }
- wa->buf_in_urb->transfer_buffer_length =
- le32_to_cpu(xfer_result->dwTransferLength);
- wa->buf_in_urb->context = seg;
- result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+ result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
+ bytes_transferred);
if (result < 0)
+ goto error_buf_in_populate;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
goto error_submit_buf_in;
+ }
} else {
- /* OUT data phase, complete it -- */
- seg->status = WA_SEG_DONE;
- seg->result = le32_to_cpu(xfer_result->dwTransferLength);
- xfer->segs_done++;
+ /* OUT data phase or no data, complete it -- */
+ seg->result = bytes_transferred;
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
@@ -2135,15 +2371,15 @@ error_submit_buf_in:
dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
xfer, seg_idx, result);
seg->result = result;
- kfree(wa->buf_in_urb->sg);
- wa->buf_in_urb->sg = NULL;
-error_sg_alloc:
+ kfree(buf_in_urb->sg);
+ buf_in_urb->sg = NULL;
+error_buf_in_populate:
__wa_xfer_abort(xfer);
seg->status = WA_SEG_ERROR;
error_complete:
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- wa_complete_remaining_xfer_segs(xfer, seg);
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
done = __wa_xfer_is_done(xfer);
/*
* queue work item to clear STALL for control endpoints.
@@ -2154,10 +2390,10 @@ error_complete:
done) {
dev_info(dev, "Control EP stall. Queue delayed work.\n");
- spin_lock_irq(&wa->xfer_list_lock);
+ spin_lock(&wa->xfer_list_lock);
/* move xfer from xfer_list to xfer_errored_list. */
list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
- spin_unlock_irq(&wa->xfer_list_lock);
+ spin_unlock(&wa->xfer_list_lock);
spin_unlock_irqrestore(&xfer->lock, flags);
queue_work(wusbd, &wa->xfer_error_work);
} else {
@@ -2172,7 +2408,7 @@ error_complete:
error_bad_seg:
spin_unlock_irqrestore(&xfer->lock, flags);
- wa_urb_dequeue(wa, xfer->urb);
+ wa_urb_dequeue(wa, xfer->urb, -ENOENT);
if (printk_ratelimit())
dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
@@ -2192,7 +2428,7 @@ segment_aborted:
*
* inbound transfers: need to schedule a buf_in_urb read
*/
-static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
+static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
{
struct device *dev = &wa->usb_iface->dev;
struct wa_xfer_packet_status_hwaiso *packet_status;
@@ -2201,8 +2437,8 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
unsigned long flags;
struct wa_seg *seg;
struct wa_rpipe *rpipe;
- unsigned done = 0;
- unsigned rpipe_ready = 0, seg_index;
+ unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
+ unsigned first_frame_index = 0, rpipe_ready = 0;
int expected_size;
/* We have a xfer result buffer; check it */
@@ -2238,37 +2474,101 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
le16_to_cpu(packet_status->wLength));
goto error_bad_seg;
}
- /* isoc packet status and lengths back xfer urb. */
+ /* write isoc packet status and lengths back to the xfer urb. */
status_array = packet_status->PacketStatus;
+ xfer->urb->start_frame =
+ wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
- xfer->urb->iso_frame_desc[seg->index].status =
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int xfer_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ iso_frame_desc[xfer_frame_index].status =
wa_xfer_status_to_errno(
le16_to_cpu(status_array[seg_index].PacketStatus));
- xfer->urb->iso_frame_desc[seg->index].actual_length =
+ iso_frame_desc[xfer_frame_index].actual_length =
le16_to_cpu(status_array[seg_index].PacketLength);
+ /* track the number of frames successfully transferred. */
+ if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
+ /* save the starting frame index for buf_in_urb. */
+ if (!data_frame_count)
+ first_frame_index = seg_index;
+ ++data_frame_count;
+ }
}
- if (!xfer->is_inbound) {
- /* OUT transfer, complete it -- */
- seg->status = WA_SEG_DONE;
- xfer->segs_done++;
+ if (xfer->is_inbound && data_frame_count) {
+ int result, total_frames_read = 0, urb_index = 0;
+ struct urb *buf_in_urb;
+
+ /* IN data phase: read to buffer */
+ seg->status = WA_SEG_DTI_PENDING;
+
+ /* start with the first frame with data. */
+ seg->isoc_frame_index = first_frame_index;
+ /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
+ do {
+ int urb_frame_index, urb_frame_count;
+ struct usb_iso_packet_descriptor *iso_frame_desc;
+
+ buf_in_urb = &(wa->buf_in_urbs[urb_index]);
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
+ buf_in_urb, xfer, seg);
+ /* advance frame index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ total_frames_read += urb_frame_count;
+
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+
+ /* skip 0-byte frames. */
+ urb_frame_index =
+ seg->isoc_frame_offset + seg->isoc_frame_index;
+ iso_frame_desc =
+ &(xfer->urb->iso_frame_desc[urb_frame_index]);
+ while ((seg->isoc_frame_index <
+ seg->isoc_frame_count) &&
+ (iso_frame_desc->actual_length == 0)) {
+ ++(seg->isoc_frame_index);
+ ++iso_frame_desc;
+ }
+ ++urb_index;
+
+ } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
+ && (seg->isoc_frame_index <
+ seg->isoc_frame_count));
+
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ } else if (data_frame_count > total_frames_read)
+ /* If we need to read more frames, set DTI busy. */
+ dti_busy = 1;
+ } else {
+ /* OUT transfer or no more IN data, complete it -- */
rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
}
spin_unlock_irqrestore(&xfer->lock, flags);
- wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+ if (dti_busy)
+ wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
+ else
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
wa_xfer_put(xfer);
- return;
+ return dti_busy;
error_bad_seg:
spin_unlock_irqrestore(&xfer->lock, flags);
wa_xfer_put(xfer);
error_parse_buffer:
- return;
+ return dti_busy;
}
/*
@@ -2288,27 +2588,86 @@ static void wa_buf_in_cb(struct urb *urb)
struct wahc *wa;
struct device *dev;
struct wa_rpipe *rpipe;
- unsigned rpipe_ready;
+ unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
unsigned long flags;
+ int resubmit_dti = 0, active_buf_in_urbs;
u8 done = 0;
/* free the sg if it was used. */
kfree(urb->sg);
urb->sg = NULL;
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ --(wa->active_buf_in_urbs);
+ active_buf_in_urbs = wa->active_buf_in_urbs;
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int seg_index;
+
+ /*
+ * Find the next isoc frame with data and count how many
+ * frames with data remain.
+ */
+ seg_index = seg->isoc_frame_index;
+ while (seg_index < seg->isoc_frame_count) {
+ const int urb_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+ /* save the index of the next frame with data */
+ if (!isoc_data_frame_count)
+ seg->isoc_frame_index = seg_index;
+ ++isoc_data_frame_count;
+ }
+ ++seg_index;
+ }
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
switch (urb->status) {
case 0:
spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
- rpipe = xfer->ep->hcpriv;
- dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
- xfer, seg->index, (size_t)urb->actual_length);
- seg->status = WA_SEG_DONE;
- seg->result = urb->actual_length;
- xfer->segs_done++;
- rpipe_ready = rpipe_avail_inc(rpipe);
- done = __wa_xfer_is_done(xfer);
+
+ seg->result += urb->actual_length;
+ if (isoc_data_frame_count > 0) {
+ int result, urb_frame_count;
+
+ /* submit a read URB for the next frame with data. */
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
+ xfer, seg);
+ /* advance index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ }
+ /*
+ * If we are in this callback and
+ * isoc_data_frame_count > 0, it means that the dti_urb
+ * submission was delayed in wa_dti_cb. Once
+ * we submit the last buf_in_urb, we can submit the
+ * delayed dti_urb.
+ */
+ resubmit_dti = (isoc_data_frame_count ==
+ urb_frame_count);
+ } else if (active_buf_in_urbs == 0) {
+ rpipe = xfer->ep->hcpriv;
+ dev_dbg(dev,
+ "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ seg->result);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_DONE);
+ }
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
@@ -2319,31 +2678,50 @@ static void wa_buf_in_cb(struct urb *urb)
case -ENOENT: /* as it was done by the who unlinked us */
break;
default: /* Other errors ... */
+ /*
+ * Error on data buf read. Only resubmit DTI if it hasn't
+ * already been done by previously hitting this error or by a
+ * successful completion of the previous buf_in_urb.
+ */
+ resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
spin_lock_irqsave(&xfer->lock, flags);
- wa = xfer->wa;
- dev = &wa->usb_iface->dev;
rpipe = xfer->ep->hcpriv;
if (printk_ratelimit())
- dev_err(dev, "xfer %p#%u: data in error %d\n",
- xfer, seg->index, urb->status);
+ dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
EDC_ERROR_TIMEFRAME)){
dev_err(dev, "DTO: URB max acceptable errors "
"exceeded, resetting device\n");
wa_reset_all(wa);
}
- seg->status = WA_SEG_ERROR;
seg->result = urb->status;
- xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
- done = __wa_xfer_is_done(xfer);
+ if (active_buf_in_urbs == 0)
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
+ else
+ __wa_xfer_abort(xfer);
spin_unlock_irqrestore(&xfer->lock, flags);
if (done)
wa_xfer_completion(xfer);
if (rpipe_ready)
wa_xfer_delayed_run(rpipe);
}
+
+ if (resubmit_dti) {
+ int result;
+
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
+ }
}
/*
@@ -2374,7 +2752,7 @@ static void wa_buf_in_cb(struct urb *urb)
*/
static void wa_dti_cb(struct urb *urb)
{
- int result;
+ int result, dti_busy = 0;
struct wahc *wa = urb->context;
struct device *dev = &wa->usb_iface->dev;
u32 xfer_id;
@@ -2407,11 +2785,15 @@ static void wa_dti_cb(struct urb *urb)
xfer_result->hdr.bNotifyType);
break;
}
+ xfer_id = le32_to_cpu(xfer_result->dwTransferID);
usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_NOT_FOUND)
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
/* taken care of already */
+ dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
+ __func__, xfer_id,
+ xfer_result->bTransferSegment & 0x7f);
break;
- xfer_id = le32_to_cpu(xfer_result->dwTransferID);
+ }
xfer = wa_xfer_get_by_id(wa, xfer_id);
if (xfer == NULL) {
/* FIXME: transaction not found. */
@@ -2422,7 +2804,7 @@ static void wa_dti_cb(struct urb *urb)
wa_xfer_result_chew(wa, xfer, xfer_result);
wa_xfer_put(xfer);
} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
- wa_process_iso_packet_status(wa, urb);
+ dti_busy = wa_process_iso_packet_status(wa, urb);
} else {
dev_err(dev, "DTI Error: unexpected EP state = %d\n",
wa->dti_state);
@@ -2445,18 +2827,69 @@ static void wa_dti_cb(struct urb *urb)
dev_err(dev, "DTI: URB error %d\n", urb->status);
break;
}
- /* Resubmit the DTI URB */
- result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
- "resetting\n", result);
- wa_reset_all(wa);
+
+ /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
+ if (!dti_busy) {
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
}
out:
return;
}
/*
+ * Initialize the DTI URB for reading transfer result notifications and also
+ * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
+ */
+int wa_dti_start(struct wahc *wa)
+{
+ const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+ struct device *dev = &wa->usb_iface->dev;
+ int result = -ENOMEM, index;
+
+ if (wa->dti_urb != NULL) /* DTI URB already started */
+ goto out;
+
+ wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->dti_urb == NULL) {
+ dev_err(dev, "Can't allocate DTI URB\n");
+ goto error_dti_urb_alloc;
+ }
+ usb_fill_bulk_urb(
+ wa->dti_urb, wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
+ wa->dti_buf, wa->dti_buf_size,
+ wa_dti_cb, wa);
+
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
+ usb_fill_bulk_urb(
+ &(wa->buf_in_urbs[index]), wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev,
+ 0x80 | dti_epd->bEndpointAddress),
+ NULL, 0, wa_buf_in_cb, wa);
+ }
+ result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
+ result);
+ goto error_dti_urb_submit;
+ }
+out:
+ return 0;
+
+error_dti_urb_submit:
+ usb_put_urb(wa->dti_urb);
+ wa->dti_urb = NULL;
+error_dti_urb_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_dti_start);
+/*
* Transfer complete notification
*
* Called from the notif.c code. We get a notification on EP2 saying
@@ -2470,15 +2903,10 @@ out:
* Follow up in wa_dti_cb(), as that's where the whole state
* machine starts.
*
- * So here we just initialize the DTI URB for reading transfer result
- * notifications and also the buffer-in URB, for reading buffers. Then
- * we just submit the DTI URB.
- *
* @wa shall be referenced
*/
void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
{
- int result;
struct device *dev = &wa->usb_iface->dev;
struct wa_notif_xfer *notif_xfer;
const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
@@ -2492,45 +2920,13 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
goto error;
}
- if (wa->dti_urb != NULL) /* DTI URB already started */
- goto out;
- wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->dti_urb == NULL) {
- dev_err(dev, "Can't allocate DTI URB\n");
- goto error_dti_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->dti_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- wa->dti_buf, wa->dti_buf_size,
- wa_dti_cb, wa);
+ /* attempt to start the DTI ep processing. */
+ if (wa_dti_start(wa) < 0)
+ goto error;
- wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (wa->buf_in_urb == NULL) {
- dev_err(dev, "Can't allocate BUF-IN URB\n");
- goto error_buf_in_urb_alloc;
- }
- usb_fill_bulk_urb(
- wa->buf_in_urb, wa->usb_dev,
- usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
- NULL, 0, wa_buf_in_cb, wa);
- result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
- if (result < 0) {
- dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
- "resetting\n", result);
- goto error_dti_urb_submit;
- }
-out:
return;
-error_dti_urb_submit:
- usb_put_urb(wa->buf_in_urb);
- wa->buf_in_urb = NULL;
-error_buf_in_urb_alloc:
- usb_put_urb(wa->dti_urb);
- wa->dti_urb = NULL;
-error_dti_urb_alloc:
error:
wa_reset_all(wa);
}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 742c607d1fa..3e1ba51d1a4 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -55,7 +55,8 @@ static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
* value of trust_timeout is jiffies.
*/
static ssize_t wusb_trust_timeout_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
@@ -173,7 +174,8 @@ static ssize_t wusb_phy_rate_store(struct device *dev,
wusbhc->phy_rate = phy_rate;
return size;
}
-static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store);
+static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show,
+ wusb_phy_rate_store);
static ssize_t wusb_dnts_show(struct device *dev,
struct device_attribute *attr,
@@ -227,7 +229,8 @@ static ssize_t wusb_retry_count_store(struct device *dev,
if (result != 1)
return -EINVAL;
- wusbhc->retry_count = max_t(uint8_t, retry_count, WUSB_RETRY_COUNT_MAX);
+ wusbhc->retry_count = max_t(uint8_t, retry_count,
+ WUSB_RETRY_COUNT_MAX);
return size;
}
@@ -321,7 +324,8 @@ int wusbhc_b_create(struct wusbhc *wusbhc)
result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
if (result < 0) {
- dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result);
+ dev_err(dev, "Cannot register WUSBHC attributes: %d\n",
+ result);
goto error_create_attr_group;
}
@@ -419,13 +423,14 @@ EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
* - After a successful transfer, update the trust timeout timestamp
* for the WUSB device.
*
- * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion
+ * - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission
* condition for the WCONNECTACK_IE is that the host has observed
* the associated device responding to a control transfer.
*/
void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
{
- struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+ struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc,
+ urb->dev);
if (status == 0 && wusb_dev) {
wusb_dev->entry_ts = jiffies;
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 6bd3b819a6b..2384add4537 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -164,7 +164,7 @@ struct wusb_port {
* functions/operations that only deal with general Wireless USB HC
* issues use this data type to refer to the host.
*
- * @usb_hcd Instantiation of a USB host controller
+ * @usb_hcd Instantiation of a USB host controller
* (initialized by upper layer [HWA=HC or WHCI].
*
* @dev Device that implements this; initialized by the
@@ -196,7 +196,7 @@ struct wusb_port {
* @ports_max Number of simultaneous device connections (fake
* ports) this HC will take. Read-only.
*
- * @port Array of port status for each fake root port. Guaranteed to
+ * @port Array of port status for each fake root port. Guaranteed to
* always be the same length during device existence
* [this allows for some unlocked but referenced reading].
*
@@ -329,7 +329,8 @@ void wusbhc_pal_unregister(struct wusbhc *wusbhc);
* This is a safe assumption as @usb_dev->bus is referenced all the
* time during the @usb_dev life cycle.
*/
-static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
+static inline
+struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
{
struct usb_hcd *usb_hcd;
usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);