diff options
Diffstat (limited to 'drivers/usb/wusbcore/wa-xfer.c')
| -rw-r--r-- | drivers/usb/wusbcore/wa-xfer.c | 1927 | 
1 files changed, 1507 insertions, 420 deletions
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 6ad02f57c36..3e2e4ed2015 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -79,7 +79,6 @@   *     availability of the different required components (blocks,   *     rpipes, segment slots, etc), we go scheduling them. Painful.   */ -#include <linux/init.h>  #include <linux/spinlock.h>  #include <linux/slab.h>  #include <linux/hash.h> @@ -91,7 +90,8 @@  #include "wusbhc.h"  enum { -	WA_SEGS_MAX = 255, +	/* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */ +	WA_SEGS_MAX = 128,  };  enum wa_seg_status { @@ -107,6 +107,7 @@ enum wa_seg_status {  };  static void wa_xfer_delayed_run(struct wa_rpipe *); +static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);  /*   * Life cycle governed by 'struct urb' (the refcount of the struct is @@ -114,24 +115,29 @@ static void wa_xfer_delayed_run(struct wa_rpipe *);   * struct).   */  struct wa_seg { -	struct urb urb; -	struct urb *dto_urb;		/* for data output? */ +	struct urb tr_urb;		/* transfer request urb. */ +	struct urb *isoc_pack_desc_urb;	/* for isoc packet descriptor. */ +	struct urb *dto_urb;		/* for data output. */  	struct list_head list_node;	/* for rpipe->req_list */  	struct wa_xfer *xfer;		/* out xfer */  	u8 index;			/* which segment we are */ +	int isoc_frame_count;	/* number of isoc frames in this segment. */ +	int isoc_frame_offset;	/* starting frame offset in the xfer URB. */ +	/* Isoc frame that the current transfer buffer corresponds to. */ +	int isoc_frame_index; +	int isoc_size;	/* size of all isoc frames sent by this seg. */  	enum wa_seg_status status;  	ssize_t result;			/* bytes xfered or error */  	struct wa_xfer_hdr xfer_hdr; -	u8 xfer_extra[];		/* xtra space for xfer_hdr_ctl */  };  static inline void wa_seg_init(struct wa_seg *seg)  { -	usb_init_urb(&seg->urb); +	usb_init_urb(&seg->tr_urb);  	/* set the remaining memory to 0. */ -	memset(((void *)seg) + sizeof(seg->urb), 0, -		sizeof(*seg) - sizeof(seg->urb)); +	memset(((void *)seg) + sizeof(seg->tr_urb), 0, +		sizeof(*seg) - sizeof(seg->tr_urb));  }  /* @@ -159,6 +165,11 @@ struct wa_xfer {  	struct wusb_dev *wusb_dev;	/* for activity timestamps */  }; +static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, +	struct wa_seg *seg, int curr_iso_frame); +static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, +		int starting_index, enum wa_seg_status status); +  static inline void wa_xfer_init(struct wa_xfer *xfer)  {  	kref_init(&xfer->refcnt); @@ -169,7 +180,7 @@ static inline void wa_xfer_init(struct wa_xfer *xfer)  /*   * Destroy a transfer structure   * - * Note that freeing xfer->seg[cnt]->urb will free the containing + * Note that freeing xfer->seg[cnt]->tr_urb will free the containing   * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.   */  static void wa_xfer_destroy(struct kref *_xfer) @@ -178,9 +189,17 @@ static void wa_xfer_destroy(struct kref *_xfer)  	if (xfer->seg) {  		unsigned cnt;  		for (cnt = 0; cnt < xfer->segs; cnt++) { -			usb_free_urb(xfer->seg[cnt]->dto_urb); -			usb_free_urb(&xfer->seg[cnt]->urb); +			struct wa_seg *seg = xfer->seg[cnt]; +			if (seg) { +				usb_free_urb(seg->isoc_pack_desc_urb); +				if (seg->dto_urb) { +					kfree(seg->dto_urb->sg); +					usb_free_urb(seg->dto_urb); +				} +				usb_free_urb(&seg->tr_urb); +			}  		} +		kfree(xfer->seg);  	}  	kfree(xfer);  } @@ -196,6 +215,59 @@ static void wa_xfer_put(struct wa_xfer *xfer)  }  /* + * Try to get exclusive access to the DTO endpoint resource.  Return true + * if successful. + */ +static inline int __wa_dto_try_get(struct wahc *wa) +{ +	return (test_and_set_bit(0, &wa->dto_in_use) == 0); +} + +/* Release the DTO endpoint resource. */ +static inline void __wa_dto_put(struct wahc *wa) +{ +	clear_bit_unlock(0, &wa->dto_in_use); +} + +/* Service RPIPEs that are waiting on the DTO resource. */ +static void wa_check_for_delayed_rpipes(struct wahc *wa) +{ +	unsigned long flags; +	int dto_waiting = 0; +	struct wa_rpipe *rpipe; + +	spin_lock_irqsave(&wa->rpipe_lock, flags); +	while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) { +		rpipe = list_first_entry(&wa->rpipe_delayed_list, +				struct wa_rpipe, list_node); +		__wa_xfer_delayed_run(rpipe, &dto_waiting); +		/* remove this RPIPE from the list if it is not waiting. */ +		if (!dto_waiting) { +			pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n", +				__func__, +				le16_to_cpu(rpipe->descr.wRPipeIndex)); +			list_del_init(&rpipe->list_node); +		} +	} +	spin_unlock_irqrestore(&wa->rpipe_lock, flags); +} + +/* add this RPIPE to the end of the delayed RPIPE list. */ +static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe) +{ +	unsigned long flags; + +	spin_lock_irqsave(&wa->rpipe_lock, flags); +	/* add rpipe to the list if it is not already on it. */ +	if (list_empty(&rpipe->list_node)) { +		pr_debug("%s: adding RPIPE %d to the delayed list.\n", +			__func__, le16_to_cpu(rpipe->descr.wRPipeIndex)); +		list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list); +	} +	spin_unlock_irqrestore(&wa->rpipe_lock, flags); +} + +/*   * xfer is referenced   *   * xfer->lock has to be unlocked @@ -211,6 +283,7 @@ static void wa_xfer_giveback(struct wa_xfer *xfer)  	spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);  	list_del_init(&xfer->list_node); +	usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);  	spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);  	/* FIXME: segmentation broken -- kills DWA */  	wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); @@ -232,6 +305,31 @@ static void wa_xfer_completion(struct wa_xfer *xfer)  }  /* + * Initialize a transfer's ID + * + * We need to use a sequential number; if we use the pointer or the + * hash of the pointer, it can repeat over sequential transfers and + * then it will confuse the HWA....wonder why in hell they put a 32 + * bit handle in there then. + */ +static void wa_xfer_id_init(struct wa_xfer *xfer) +{ +	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); +} + +/* Return the xfer's ID. */ +static inline u32 wa_xfer_id(struct wa_xfer *xfer) +{ +	return xfer->id; +} + +/* Return the xfer's ID in transport format (little endian). */ +static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer) +{ +	return cpu_to_le32(xfer->id); +} + +/*   * If transfer is done, wrap it up and return true   *   * xfer->lock has to be locked @@ -253,33 +351,37 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)  		switch (seg->status) {  		case WA_SEG_DONE:  			if (found_short && seg->result > 0) { -				dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", -					xfer, cnt, seg->result); +				dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n", +					xfer, wa_xfer_id(xfer), cnt, +					seg->result);  				urb->status = -EINVAL;  				goto out;  			}  			urb->actual_length += seg->result; -			if (seg->result < xfer->seg_size +			if (!(usb_pipeisoc(xfer->urb->pipe)) +				&& seg->result < xfer->seg_size  			    && cnt != xfer->segs-1)  				found_short = 1; -			dev_dbg(dev, "xfer %p#%u: DONE short %d " +			dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "  				"result %zu urb->actual_length %d\n", -				xfer, seg->index, found_short, seg->result, -				urb->actual_length); +				xfer, wa_xfer_id(xfer), seg->index, found_short, +				seg->result, urb->actual_length);  			break;  		case WA_SEG_ERROR:  			xfer->result = seg->result; -			dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", -				xfer, seg->index, seg->result); +			dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n", +				xfer, wa_xfer_id(xfer), seg->index, seg->result, +				seg->result);  			goto out;  		case WA_SEG_ABORTED: -			dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", -				xfer, seg->index, urb->status); -			xfer->result = urb->status; +			xfer->result = seg->result; +			dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n", +				xfer, wa_xfer_id(xfer), seg->index, seg->result, +				seg->result);  			goto out;  		default: -			dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", -				 xfer, cnt, seg->status); +			dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n", +				 xfer, wa_xfer_id(xfer), cnt, seg->status);  			xfer->result = -EINVAL;  			goto out;  		} @@ -290,26 +392,21 @@ out:  }  /* - * Initialize a transfer's ID + * Mark the given segment as done.  Return true if this completes the xfer. + * This should only be called for segs that have been submitted to an RPIPE. + * Delayed segs are not marked as submitted so they do not need to be marked + * as done when cleaning up.   * - * We need to use a sequential number; if we use the pointer or the - * hash of the pointer, it can repeat over sequential transfers and - * then it will confuse the HWA....wonder why in hell they put a 32 - * bit handle in there then. + * xfer->lock has to be locked   */ -static void wa_xfer_id_init(struct wa_xfer *xfer) +static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer, +	struct wa_seg *seg, enum wa_seg_status status)  { -	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); -} +	seg->status = status; +	xfer->segs_done++; -/* - * Return the xfer's ID associated with xfer - * - * Need to generate a - */ -static u32 wa_xfer_id(struct wa_xfer *xfer) -{ -	return xfer->id; +	/* check for done. */ +	return __wa_xfer_is_done(xfer);  }  /* @@ -339,12 +436,51 @@ out:  struct wa_xfer_abort_buffer {  	struct urb urb; +	struct wahc *wa;  	struct wa_xfer_abort cmd;  };  static void __wa_xfer_abort_cb(struct urb *urb)  {  	struct wa_xfer_abort_buffer *b = urb->context; +	struct wahc *wa = b->wa; + +	/* +	 * If the abort request URB failed, then the HWA did not get the abort +	 * command.  Forcibly clean up the xfer without waiting for a Transfer +	 * Result from the HWA. +	 */ +	if (urb->status < 0) { +		struct wa_xfer *xfer; +		struct device *dev = &wa->usb_iface->dev; + +		xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID)); +		dev_err(dev, "%s: Transfer Abort request failed. result: %d\n", +			__func__, urb->status); +		if (xfer) { +			unsigned long flags; +			int done; +			struct wa_rpipe *rpipe = xfer->ep->hcpriv; + +			dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n", +				__func__, xfer, wa_xfer_id(xfer)); +			spin_lock_irqsave(&xfer->lock, flags); +			/* mark all segs as aborted. */ +			wa_complete_remaining_xfer_segs(xfer, 0, +				WA_SEG_ABORTED); +			done = __wa_xfer_is_done(xfer); +			spin_unlock_irqrestore(&xfer->lock, flags); +			if (done) +				wa_xfer_completion(xfer); +			wa_xfer_delayed_run(rpipe); +			wa_xfer_put(xfer); +		} else { +			dev_err(dev, "%s: xfer ID 0x%08X already gone.\n", +				 __func__, le32_to_cpu(b->cmd.dwTransferID)); +		} +	} + +	wa_put(wa);	/* taken in __wa_xfer_abort */  	usb_put_urb(&b->urb);  } @@ -356,15 +492,11 @@ static void __wa_xfer_abort_cb(struct urb *urb)   *   * The callback (see above) does nothing but freeing up the data by   * putting the URB. Because the URB is allocated at the head of the - * struct, the whole space we allocated is kfreed. - * - * We'll get an 'aborted transaction' xfer result on DTI, that'll - * politely ignore because at this point the transaction has been - * marked as aborted already. + * struct, the whole space we allocated is kfreed. *   */ -static void __wa_xfer_abort(struct wa_xfer *xfer) +static int __wa_xfer_abort(struct wa_xfer *xfer)  { -	int result; +	int result = -ENOMEM;  	struct device *dev = &xfer->wa->usb_iface->dev;  	struct wa_xfer_abort_buffer *b;  	struct wa_rpipe *rpipe = xfer->ep->hcpriv; @@ -375,7 +507,8 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)  	b->cmd.bLength =  sizeof(b->cmd);  	b->cmd.bRequestType = WA_XFER_ABORT;  	b->cmd.wRPipe = rpipe->descr.wRPipeIndex; -	b->cmd.dwTransferID = wa_xfer_id(xfer); +	b->cmd.dwTransferID = wa_xfer_id_le32(xfer); +	b->wa = wa_get(xfer->wa);  	usb_init_urb(&b->urb);  	usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, @@ -385,20 +518,63 @@ static void __wa_xfer_abort(struct wa_xfer *xfer)  	result = usb_submit_urb(&b->urb, GFP_ATOMIC);  	if (result < 0)  		goto error_submit; -	return;				/* callback frees! */ +	return result;				/* callback frees! */  error_submit: +	wa_put(xfer->wa);  	if (printk_ratelimit())  		dev_err(dev, "xfer %p: Can't submit abort request: %d\n",  			xfer, result);  	kfree(b);  error_kmalloc: -	return; +	return result;  }  /* + * Calculate the number of isoc frames starting from isoc_frame_offset + * that will fit a in transfer segment. + */ +static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer, +	int isoc_frame_offset, int *total_size) +{ +	int segment_size = 0, frame_count = 0; +	int index = isoc_frame_offset; +	struct usb_iso_packet_descriptor *iso_frame_desc = +		xfer->urb->iso_frame_desc; + +	while ((index < xfer->urb->number_of_packets) +		&& ((segment_size + iso_frame_desc[index].length) +				<= xfer->seg_size)) { +		/* +		 * For Alereon HWA devices, only include an isoc frame in an +		 * out segment if it is physically contiguous with the previous +		 * frame.  This is required because those devices expect +		 * the isoc frames to be sent as a single USB transaction as +		 * opposed to one transaction per frame with standard HWA. +		 */ +		if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) +			&& (xfer->is_inbound == 0) +			&& (index > isoc_frame_offset) +			&& ((iso_frame_desc[index - 1].offset + +				iso_frame_desc[index - 1].length) != +				iso_frame_desc[index].offset)) +			break; + +		/* this frame fits. count it. */ +		++frame_count; +		segment_size += iso_frame_desc[index].length; + +		/* move to the next isoc frame. */ +		++index; +	} + +	*total_size = segment_size; +	return frame_count; +} + +/*   *   * @returns < 0 on error, transfer segment request size if ok   */ @@ -422,43 +598,85 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,  		result = sizeof(struct wa_xfer_bi);  		break;  	case USB_ENDPOINT_XFER_ISOC: -		dev_err(dev, "FIXME: ISOC not implemented\n"); -		result = -ENOSYS; -		goto error; +		*pxfer_type = WA_XFER_TYPE_ISO; +		result = sizeof(struct wa_xfer_hwaiso); +		break;  	default:  		/* never happens */  		BUG();  		result = -EINVAL;	/* shut gcc up */ -	}; +	}  	xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;  	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; + +	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);  	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)  		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);  	/* Compute the segment size and make sure it is a multiple of  	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of  	 * a check (FIXME) */ -	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);  	if (xfer->seg_size < maxpktsize) { -		dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " -			"%zu\n", xfer->seg_size, maxpktsize); +		dev_err(dev, +			"HW BUG? seg_size %zu smaller than maxpktsize %zu\n", +			xfer->seg_size, maxpktsize);  		result = -EINVAL;  		goto error;  	}  	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; -	xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size); -	if (xfer->segs >= WA_SEGS_MAX) { -		dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", -			(int)(urb->transfer_buffer_length / xfer->seg_size), +	if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) { +		int index = 0; + +		xfer->segs = 0; +		/* +		 * loop over urb->number_of_packets to determine how many +		 * xfer segments will be needed to send the isoc frames. +		 */ +		while (index < urb->number_of_packets) { +			int seg_size; /* don't care. */ +			index += __wa_seg_calculate_isoc_frame_count(xfer, +					index, &seg_size); +			++xfer->segs; +		} +	} else { +		xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, +						xfer->seg_size); +		if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) +			xfer->segs = 1; +	} + +	if (xfer->segs > WA_SEGS_MAX) { +		dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n", +			(urb->transfer_buffer_length/xfer->seg_size),  			WA_SEGS_MAX);  		result = -EINVAL;  		goto error;  	} -	if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) -		xfer->segs = 1;  error:  	return result;  } +static void __wa_setup_isoc_packet_descr( +		struct wa_xfer_packet_info_hwaiso *packet_desc, +		struct wa_xfer *xfer, +		struct wa_seg *seg) { +	struct usb_iso_packet_descriptor *iso_frame_desc = +		xfer->urb->iso_frame_desc; +	int frame_index; + +	/* populate isoc packet descriptor. */ +	packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO; +	packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) + +		(sizeof(packet_desc->PacketLength[0]) * +			seg->isoc_frame_count)); +	for (frame_index = 0; frame_index < seg->isoc_frame_count; +		++frame_index) { +		int offset_index = frame_index + seg->isoc_frame_offset; +		packet_desc->PacketLength[frame_index] = +			cpu_to_le16(iso_frame_desc[offset_index].length); +	} +} + +  /* Fill in the common request header and xfer-type specific data. */  static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  				 struct wa_xfer_hdr *xfer_hdr0, @@ -466,12 +684,13 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  				 size_t xfer_hdr_size)  {  	struct wa_rpipe *rpipe = xfer->ep->hcpriv; +	struct wa_seg *seg = xfer->seg[0]; -	xfer_hdr0 = &xfer->seg[0]->xfer_hdr; +	xfer_hdr0 = &seg->xfer_hdr;  	xfer_hdr0->bLength = xfer_hdr_size;  	xfer_hdr0->bRequestType = xfer_type;  	xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; -	xfer_hdr0->dwTransferID = wa_xfer_id(xfer); +	xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);  	xfer_hdr0->bTransferSegment = 0;  	switch (xfer_type) {  	case WA_XFER_TYPE_CTL: { @@ -484,8 +703,18 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  	}  	case WA_XFER_TYPE_BI:  		break; -	case WA_XFER_TYPE_ISO: -		printk(KERN_ERR "FIXME: ISOC not implemented\n"); +	case WA_XFER_TYPE_ISO: { +		struct wa_xfer_hwaiso *xfer_iso = +			container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr); +		struct wa_xfer_packet_info_hwaiso *packet_desc = +			((void *)xfer_iso) + xfer_hdr_size; + +		/* populate the isoc section of the transfer request. */ +		xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count); +		/* populate isoc packet descriptor. */ +		__wa_setup_isoc_packet_descr(packet_desc, xfer, seg); +		break; +	}  	default:  		BUG();  	}; @@ -494,12 +723,12 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,  /*   * Callback for the OUT data phase of the segment request   * - * Check wa_seg_cb(); most comments also apply here because this + * Check wa_seg_tr_cb(); most comments also apply here because this   * function does almost the same thing and they work closely   * together.   *   * If the seg request has failed but this DTO phase has succeeded, - * wa_seg_cb() has already failed the segment and moved the + * wa_seg_tr_cb() has already failed the segment and moved the   * status to WA_SEG_ERROR, so this will go through 'case 0' and   * effectively do nothing.   */ @@ -512,6 +741,143 @@ static void wa_seg_dto_cb(struct urb *urb)  	struct wa_rpipe *rpipe;  	unsigned long flags;  	unsigned rpipe_ready = 0; +	int data_send_done = 1, release_dto = 0, holding_dto = 0; +	u8 done = 0; +	int result; + +	/* free the sg if it was used. */ +	kfree(urb->sg); +	urb->sg = NULL; + +	spin_lock_irqsave(&xfer->lock, flags); +	wa = xfer->wa; +	dev = &wa->usb_iface->dev; +	if (usb_pipeisoc(xfer->urb->pipe)) { +		/* Alereon HWA sends all isoc frames in a single transfer. */ +		if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) +			seg->isoc_frame_index += seg->isoc_frame_count; +		else +			seg->isoc_frame_index += 1; +		if (seg->isoc_frame_index < seg->isoc_frame_count) { +			data_send_done = 0; +			holding_dto = 1; /* checked in error cases. */ +			/* +			 * if this is the last isoc frame of the segment, we +			 * can release DTO after sending this frame. +			 */ +			if ((seg->isoc_frame_index + 1) >= +				seg->isoc_frame_count) +				release_dto = 1; +		} +		dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n", +			wa_xfer_id(xfer), seg->index, seg->isoc_frame_index, +			holding_dto, release_dto); +	} +	spin_unlock_irqrestore(&xfer->lock, flags); + +	switch (urb->status) { +	case 0: +		spin_lock_irqsave(&xfer->lock, flags); +		seg->result += urb->actual_length; +		if (data_send_done) { +			dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n", +				wa_xfer_id(xfer), seg->index, seg->result); +			if (seg->status < WA_SEG_PENDING) +				seg->status = WA_SEG_PENDING; +		} else { +			/* should only hit this for isoc xfers. */ +			/* +			 * Populate the dto URB with the next isoc frame buffer, +			 * send the URB and release DTO if we no longer need it. +			 */ +			 __wa_populate_dto_urb_isoc(xfer, seg, +				seg->isoc_frame_offset + seg->isoc_frame_index); + +			/* resubmit the URB with the next isoc frame. */ +			/* take a ref on resubmit. */ +			wa_xfer_get(xfer); +			result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); +			if (result < 0) { +				dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n", +				       wa_xfer_id(xfer), seg->index, result); +				spin_unlock_irqrestore(&xfer->lock, flags); +				goto error_dto_submit; +			} +		} +		spin_unlock_irqrestore(&xfer->lock, flags); +		if (release_dto) { +			__wa_dto_put(wa); +			wa_check_for_delayed_rpipes(wa); +		} +		break; +	case -ECONNRESET:	/* URB unlinked; no need to do anything */ +	case -ENOENT:		/* as it was done by the who unlinked us */ +		if (holding_dto) { +			__wa_dto_put(wa); +			wa_check_for_delayed_rpipes(wa); +		} +		break; +	default:		/* Other errors ... */ +		dev_err(dev, "xfer 0x%08X#%u: data out error %d\n", +			wa_xfer_id(xfer), seg->index, urb->status); +		goto error_default; +	} + +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer); +	return; + +error_dto_submit: +	/* taken on resubmit attempt. */ +	wa_xfer_put(xfer); +error_default: +	spin_lock_irqsave(&xfer->lock, flags); +	rpipe = xfer->ep->hcpriv; +	if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, +		    EDC_ERROR_TIMEFRAME)){ +		dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n"); +		wa_reset_all(wa); +	} +	if (seg->status != WA_SEG_ERROR) { +		seg->result = urb->status; +		__wa_xfer_abort(xfer); +		rpipe_ready = rpipe_avail_inc(rpipe); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); +	} +	spin_unlock_irqrestore(&xfer->lock, flags); +	if (holding_dto) { +		__wa_dto_put(wa); +		wa_check_for_delayed_rpipes(wa); +	} +	if (done) +		wa_xfer_completion(xfer); +	if (rpipe_ready) +		wa_xfer_delayed_run(rpipe); +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer); +} + +/* + * Callback for the isoc packet descriptor phase of the segment request + * + * Check wa_seg_tr_cb(); most comments also apply here because this + * function does almost the same thing and they work closely + * together. + * + * If the seg request has failed but this phase has succeeded, + * wa_seg_tr_cb() has already failed the segment and moved the + * status to WA_SEG_ERROR, so this will go through 'case 0' and + * effectively do nothing. + */ +static void wa_seg_iso_pack_desc_cb(struct urb *urb) +{ +	struct wa_seg *seg = urb->context; +	struct wa_xfer *xfer = seg->xfer; +	struct wahc *wa; +	struct device *dev; +	struct wa_rpipe *rpipe; +	unsigned long flags; +	unsigned rpipe_ready = 0;  	u8 done = 0;  	switch (urb->status) { @@ -519,11 +885,10 @@ static void wa_seg_dto_cb(struct urb *urb)  		spin_lock_irqsave(&xfer->lock, flags);  		wa = xfer->wa;  		dev = &wa->usb_iface->dev; -		dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", -			xfer, seg->index, urb->actual_length); -		if (seg->status < WA_SEG_PENDING) +		dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n", +			wa_xfer_id(xfer), seg->index); +		if (xfer->is_inbound && seg->status < WA_SEG_PENDING)  			seg->status = WA_SEG_PENDING; -		seg->result = urb->actual_length;  		spin_unlock_irqrestore(&xfer->lock, flags);  		break;  	case -ECONNRESET:	/* URB unlinked; no need to do anything */ @@ -534,21 +899,20 @@ static void wa_seg_dto_cb(struct urb *urb)  		wa = xfer->wa;  		dev = &wa->usb_iface->dev;  		rpipe = xfer->ep->hcpriv; -		dev_dbg(dev, "xfer %p#%u: data out error %d\n", -			xfer, seg->index, urb->status); +		pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n", +				wa_xfer_id(xfer), seg->index, urb->status);  		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,  			    EDC_ERROR_TIMEFRAME)){ -			dev_err(dev, "DTO: URB max acceptable errors " -				"exceeded, resetting device\n"); +			dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");  			wa_reset_all(wa);  		}  		if (seg->status != WA_SEG_ERROR) { -			seg->status = WA_SEG_ERROR; +			usb_unlink_urb(seg->dto_urb);  			seg->result = urb->status; -			xfer->segs_done++;  			__wa_xfer_abort(xfer);  			rpipe_ready = rpipe_avail_inc(rpipe); -			done = __wa_xfer_is_done(xfer); +			done = __wa_xfer_mark_seg_as_done(xfer, seg, +					WA_SEG_ERROR);  		}  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done) @@ -556,6 +920,8 @@ static void wa_seg_dto_cb(struct urb *urb)  		if (rpipe_ready)  			wa_xfer_delayed_run(rpipe);  	} +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer);  }  /* @@ -572,11 +938,11 @@ static void wa_seg_dto_cb(struct urb *urb)   * We have to check before setting the status to WA_SEG_PENDING   * because sometimes the xfer result callback arrives before this   * callback (geeeeeeze), so it might happen that we are already in - * another state. As well, we don't set it if the transfer is inbound, + * another state. As well, we don't set it if the transfer is not inbound,   * as in that case, wa_seg_dto_cb will do it when the OUT data phase   * finishes.   */ -static void wa_seg_cb(struct urb *urb) +static void wa_seg_tr_cb(struct urb *urb)  {  	struct wa_seg *seg = urb->context;  	struct wa_xfer *xfer = seg->xfer; @@ -592,8 +958,11 @@ static void wa_seg_cb(struct urb *urb)  		spin_lock_irqsave(&xfer->lock, flags);  		wa = xfer->wa;  		dev = &wa->usb_iface->dev; -		dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); -		if (xfer->is_inbound && seg->status < WA_SEG_PENDING) +		dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n", +			xfer, wa_xfer_id(xfer), seg->index); +		if (xfer->is_inbound && +			seg->status < WA_SEG_PENDING && +			!(usb_pipeisoc(xfer->urb->pipe)))  			seg->status = WA_SEG_PENDING;  		spin_unlock_irqrestore(&xfer->lock, flags);  		break; @@ -606,35 +975,39 @@ static void wa_seg_cb(struct urb *urb)  		dev = &wa->usb_iface->dev;  		rpipe = xfer->ep->hcpriv;  		if (printk_ratelimit()) -			dev_err(dev, "xfer %p#%u: request error %d\n", -				xfer, seg->index, urb->status); +			dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n", +				xfer, wa_xfer_id(xfer), seg->index, +				urb->status);  		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,  			    EDC_ERROR_TIMEFRAME)){  			dev_err(dev, "DTO: URB max acceptable errors "  				"exceeded, resetting device\n");  			wa_reset_all(wa);  		} +		usb_unlink_urb(seg->isoc_pack_desc_urb);  		usb_unlink_urb(seg->dto_urb); -		seg->status = WA_SEG_ERROR;  		seg->result = urb->status; -		xfer->segs_done++;  		__wa_xfer_abort(xfer);  		rpipe_ready = rpipe_avail_inc(rpipe); -		done = __wa_xfer_is_done(xfer); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done)  			wa_xfer_completion(xfer);  		if (rpipe_ready)  			wa_xfer_delayed_run(rpipe);  	} +	/* taken when this URB was submitted. */ +	wa_xfer_put(xfer);  } -/* allocate an SG list to store bytes_to_transfer bytes and copy the +/* + * Allocate an SG list to store bytes_to_transfer bytes and copy the   * subset of the in_sg that matches the buffer subset - * we are about to transfer. */ + * we are about to transfer. + */  static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,  	const unsigned int bytes_transferred, -	const unsigned int bytes_to_transfer, unsigned int *out_num_sgs) +	const unsigned int bytes_to_transfer, int *out_num_sgs)  {  	struct scatterlist *out_sg;  	unsigned int bytes_processed = 0, offset_into_current_page_data = 0, @@ -710,6 +1083,75 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,  }  /* + * Populate DMA buffer info for the isoc dto urb. + */ +static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, +	struct wa_seg *seg, int curr_iso_frame) +{ +	seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +	seg->dto_urb->sg = NULL; +	seg->dto_urb->num_sgs = 0; +	/* dto urb buffer address pulled from iso_frame_desc. */ +	seg->dto_urb->transfer_dma = xfer->urb->transfer_dma + +		xfer->urb->iso_frame_desc[curr_iso_frame].offset; +	/* The Alereon HWA sends a single URB with all isoc segs. */ +	if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) +		seg->dto_urb->transfer_buffer_length = seg->isoc_size; +	else +		seg->dto_urb->transfer_buffer_length = +			xfer->urb->iso_frame_desc[curr_iso_frame].length; +} + +/* + * Populate buffer ptr and size, DMA buffer or SG list for the dto urb. + */ +static int __wa_populate_dto_urb(struct wa_xfer *xfer, +	struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size) +{ +	int result = 0; + +	if (xfer->is_dma) { +		seg->dto_urb->transfer_dma = +			xfer->urb->transfer_dma + buf_itr_offset; +		seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +		seg->dto_urb->sg = NULL; +		seg->dto_urb->num_sgs = 0; +	} else { +		/* do buffer or SG processing. */ +		seg->dto_urb->transfer_flags &= +			~URB_NO_TRANSFER_DMA_MAP; +		/* this should always be 0 before a resubmit. */ +		seg->dto_urb->num_mapped_sgs = 0; + +		if (xfer->urb->transfer_buffer) { +			seg->dto_urb->transfer_buffer = +				xfer->urb->transfer_buffer + +				buf_itr_offset; +			seg->dto_urb->sg = NULL; +			seg->dto_urb->num_sgs = 0; +		} else { +			seg->dto_urb->transfer_buffer = NULL; + +			/* +			 * allocate an SG list to store seg_size bytes +			 * and copy the subset of the xfer->urb->sg that +			 * matches the buffer subset we are about to +			 * read. +			 */ +			seg->dto_urb->sg = wa_xfer_create_subset_sg( +				xfer->urb->sg, +				buf_itr_offset, buf_itr_size, +				&(seg->dto_urb->num_sgs)); +			if (!(seg->dto_urb->sg)) +				result = -ENOMEM; +		} +	} +	seg->dto_urb->transfer_buffer_length = buf_itr_size; + +	return result; +} + +/*   * Allocate the segs array and initialize each of them   *   * The segments are freed by wa_xfer_destroy() when the xfer use count @@ -719,7 +1161,7 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,   */  static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)  { -	int result, cnt; +	int result, cnt, isoc_frame_offset = 0;  	size_t alloc_size = sizeof(*xfer->seg[0])  		- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;  	struct usb_device *usb_dev = xfer->wa->usb_dev; @@ -734,18 +1176,63 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)  	buf_itr = 0;  	buf_size = xfer->urb->transfer_buffer_length;  	for (cnt = 0; cnt < xfer->segs; cnt++) { -		seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC); +		size_t iso_pkt_descr_size = 0; +		int seg_isoc_frame_count = 0, seg_isoc_size = 0; + +		/* +		 * Adjust the size of the segment object to contain space for +		 * the isoc packet descriptor buffer. +		 */ +		if (usb_pipeisoc(xfer->urb->pipe)) { +			seg_isoc_frame_count = +				__wa_seg_calculate_isoc_frame_count(xfer, +					isoc_frame_offset, &seg_isoc_size); + +			iso_pkt_descr_size = +				sizeof(struct wa_xfer_packet_info_hwaiso) + +				(seg_isoc_frame_count * sizeof(__le16)); +		} +		seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size, +						GFP_ATOMIC);  		if (seg == NULL)  			goto error_seg_kmalloc;  		wa_seg_init(seg);  		seg->xfer = xfer;  		seg->index = cnt; -		usb_fill_bulk_urb(&seg->urb, usb_dev, +		usb_fill_bulk_urb(&seg->tr_urb, usb_dev,  				  usb_sndbulkpipe(usb_dev,  						  dto_epd->bEndpointAddress),  				  &seg->xfer_hdr, xfer_hdr_size, -				  wa_seg_cb, seg); +				  wa_seg_tr_cb, seg);  		buf_itr_size = min(buf_size, xfer->seg_size); + +		if (usb_pipeisoc(xfer->urb->pipe)) { +			seg->isoc_frame_count = seg_isoc_frame_count; +			seg->isoc_frame_offset = isoc_frame_offset; +			seg->isoc_size = seg_isoc_size; +			/* iso packet descriptor. */ +			seg->isoc_pack_desc_urb = +					usb_alloc_urb(0, GFP_ATOMIC); +			if (seg->isoc_pack_desc_urb == NULL) +				goto error_iso_pack_desc_alloc; +			/* +			 * The buffer for the isoc packet descriptor starts +			 * after the transfer request header in the +			 * segment object memory buffer. +			 */ +			usb_fill_bulk_urb( +				seg->isoc_pack_desc_urb, usb_dev, +				usb_sndbulkpipe(usb_dev, +					dto_epd->bEndpointAddress), +				(void *)(&seg->xfer_hdr) + +					xfer_hdr_size, +				iso_pkt_descr_size, +				wa_seg_iso_pack_desc_cb, seg); + +			/* adjust starting frame offset for next seg. */ +			isoc_frame_offset += seg_isoc_frame_count; +		} +  		if (xfer->is_inbound == 0 && buf_size > 0) {  			/* outbound data. */  			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); @@ -756,69 +1243,44 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)  				usb_sndbulkpipe(usb_dev,  						dto_epd->bEndpointAddress),  				NULL, 0, wa_seg_dto_cb, seg); -			if (xfer->is_dma) { -				seg->dto_urb->transfer_dma = -					xfer->urb->transfer_dma + buf_itr; -				seg->dto_urb->transfer_flags |= -					URB_NO_TRANSFER_DMA_MAP; -				seg->dto_urb->transfer_buffer = NULL; -				seg->dto_urb->sg = NULL; -				seg->dto_urb->num_sgs = 0; + +			if (usb_pipeisoc(xfer->urb->pipe)) { +				/* +				 * Fill in the xfer buffer information for the +				 * first isoc frame.  Subsequent frames in this +				 * segment will be filled in and sent from the +				 * DTO completion routine, if needed. +				 */ +				__wa_populate_dto_urb_isoc(xfer, seg, +					seg->isoc_frame_offset);  			} else { -				/* do buffer or SG processing. */ -				seg->dto_urb->transfer_flags &= -					~URB_NO_TRANSFER_DMA_MAP; -				/* this should always be 0 before a resubmit. */ -				seg->dto_urb->num_mapped_sgs = 0; - -				if (xfer->urb->transfer_buffer) { -					seg->dto_urb->transfer_buffer = -						xfer->urb->transfer_buffer + -						buf_itr; -					seg->dto_urb->sg = NULL; -					seg->dto_urb->num_sgs = 0; -				} else { -					/* allocate an SG list to store seg_size -					    bytes and copy the subset of the -					    xfer->urb->sg that matches the -					    buffer subset we are about to read. -					*/ -					seg->dto_urb->sg = -						wa_xfer_create_subset_sg( -						xfer->urb->sg, -						buf_itr, buf_itr_size, -						&(seg->dto_urb->num_sgs)); - -					if (!(seg->dto_urb->sg)) { -						seg->dto_urb->num_sgs	= 0; -						goto error_sg_alloc; -					} - -					seg->dto_urb->transfer_buffer = NULL; -				} +				/* fill in the xfer buffer information. */ +				result = __wa_populate_dto_urb(xfer, seg, +							buf_itr, buf_itr_size); +				if (result < 0) +					goto error_seg_outbound_populate; + +				buf_itr += buf_itr_size; +				buf_size -= buf_itr_size;  			} -			seg->dto_urb->transfer_buffer_length = buf_itr_size;  		}  		seg->status = WA_SEG_READY; -		buf_itr += buf_itr_size; -		buf_size -= buf_itr_size;  	}  	return 0; -error_sg_alloc: +	/* +	 * Free the memory for the current segment which failed to init. +	 * Use the fact that cnt is left at were it failed.  The remaining +	 * segments will be cleaned up by wa_xfer_destroy. +	 */ +error_seg_outbound_populate:  	usb_free_urb(xfer->seg[cnt]->dto_urb);  error_dto_alloc: +	usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb); +error_iso_pack_desc_alloc:  	kfree(xfer->seg[cnt]); -	cnt--; +	xfer->seg[cnt] = NULL;  error_seg_kmalloc: -	/* use the fact that cnt is left at were it failed */ -	for (; cnt >= 0; cnt--) { -		if (xfer->seg[cnt] && xfer->is_inbound == 0) { -			usb_free_urb(xfer->seg[cnt]->dto_urb); -			kfree(xfer->seg[cnt]->dto_urb->sg); -		} -		kfree(xfer->seg[cnt]); -	}  error_segs_kzalloc:  	return result;  } @@ -856,21 +1318,50 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)  	wa_xfer_id_init(xfer);  	__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); -	/* Fill remainig headers */ +	/* Fill remaining headers */  	xfer_hdr = xfer_hdr0; -	transfer_size = urb->transfer_buffer_length; -	xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? -		xfer->seg_size : transfer_size; -	transfer_size -=  xfer->seg_size; -	for (cnt = 1; cnt < xfer->segs; cnt++) { -		xfer_hdr = &xfer->seg[cnt]->xfer_hdr; -		memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); -		xfer_hdr->bTransferSegment = cnt; -		xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? -			cpu_to_le32(xfer->seg_size) -			: cpu_to_le32(transfer_size); -		xfer->seg[cnt]->status = WA_SEG_READY; +	if (xfer_type == WA_XFER_TYPE_ISO) { +		xfer_hdr0->dwTransferLength = +			cpu_to_le32(xfer->seg[0]->isoc_size); +		for (cnt = 1; cnt < xfer->segs; cnt++) { +			struct wa_xfer_packet_info_hwaiso *packet_desc; +			struct wa_seg *seg = xfer->seg[cnt]; +			struct wa_xfer_hwaiso *xfer_iso; + +			xfer_hdr = &seg->xfer_hdr; +			xfer_iso = container_of(xfer_hdr, +						struct wa_xfer_hwaiso, hdr); +			packet_desc = ((void *)xfer_hdr) + xfer_hdr_size; +			/* +			 * Copy values from the 0th header. Segment specific +			 * values are set below. +			 */ +			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); +			xfer_hdr->bTransferSegment = cnt; +			xfer_hdr->dwTransferLength = +				cpu_to_le32(seg->isoc_size); +			xfer_iso->dwNumOfPackets = +					cpu_to_le32(seg->isoc_frame_count); +			__wa_setup_isoc_packet_descr(packet_desc, xfer, seg); +			seg->status = WA_SEG_READY; +		} +	} else { +		transfer_size = urb->transfer_buffer_length; +		xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? +			cpu_to_le32(xfer->seg_size) : +			cpu_to_le32(transfer_size);  		transfer_size -=  xfer->seg_size; +		for (cnt = 1; cnt < xfer->segs; cnt++) { +			xfer_hdr = &xfer->seg[cnt]->xfer_hdr; +			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); +			xfer_hdr->bTransferSegment = cnt; +			xfer_hdr->dwTransferLength = +				transfer_size > xfer->seg_size ? +					cpu_to_le32(xfer->seg_size) +					: cpu_to_le32(transfer_size); +			xfer->seg[cnt]->status = WA_SEG_READY; +			transfer_size -=  xfer->seg_size; +		}  	}  	xfer_hdr->bTransferSegment |= 0x80;	/* this is the last segment */  	result = 0; @@ -885,70 +1376,161 @@ error_setup_sizes:   * rpipe->seg_lock is held!   */  static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, -			   struct wa_seg *seg) +			   struct wa_seg *seg, int *dto_done)  {  	int result; -	result = usb_submit_urb(&seg->urb, GFP_ATOMIC); + +	/* default to done unless we encounter a multi-frame isoc segment. */ +	*dto_done = 1; + +	/* +	 * Take a ref for each segment urb so the xfer cannot disappear until +	 * all of the callbacks run. +	 */ +	wa_xfer_get(xfer); +	/* submit the transfer request. */ +	seg->status = WA_SEG_SUBMITTED; +	result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);  	if (result < 0) { -		printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", -		       xfer, seg->index, result); -		goto error_seg_submit; +		pr_err("%s: xfer %p#%u: REQ submit failed: %d\n", +		       __func__, xfer, seg->index, result); +		wa_xfer_put(xfer); +		goto error_tr_submit;  	} +	/* submit the isoc packet descriptor if present. */ +	if (seg->isoc_pack_desc_urb) { +		wa_xfer_get(xfer); +		result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC); +		seg->isoc_frame_index = 0; +		if (result < 0) { +			pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n", +			       __func__, xfer, seg->index, result); +			wa_xfer_put(xfer); +			goto error_iso_pack_desc_submit; +		} +	} +	/* submit the out data if this is an out request. */  	if (seg->dto_urb) { +		struct wahc *wa = xfer->wa; +		wa_xfer_get(xfer);  		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);  		if (result < 0) { -			printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", -			       xfer, seg->index, result); +			pr_err("%s: xfer %p#%u: DTO submit failed: %d\n", +			       __func__, xfer, seg->index, result); +			wa_xfer_put(xfer);  			goto error_dto_submit;  		} +		/* +		 * If this segment contains more than one isoc frame, hold +		 * onto the dto resource until we send all frames. +		 * Only applies to non-Alereon devices. +		 */ +		if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0) +			&& (seg->isoc_frame_count > 1)) +			*dto_done = 0;  	} -	seg->status = WA_SEG_SUBMITTED;  	rpipe_avail_dec(rpipe);  	return 0;  error_dto_submit: -	usb_unlink_urb(&seg->urb); -error_seg_submit: +	usb_unlink_urb(seg->isoc_pack_desc_urb); +error_iso_pack_desc_submit: +	usb_unlink_urb(&seg->tr_urb); +error_tr_submit:  	seg->status = WA_SEG_ERROR;  	seg->result = result; +	*dto_done = 1;  	return result;  }  /* - * Execute more queued request segments until the maximum concurrent allowed + * Execute more queued request segments until the maximum concurrent allowed. + * Return true if the DTO resource was acquired and released.   *   * The ugly unlock/lock sequence on the error path is needed as the   * xfer->lock normally nests the seg_lock and not viceversa. - *   */ -static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) +static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)  { -	int result; +	int result, dto_acquired = 0, dto_done = 0;  	struct device *dev = &rpipe->wa->usb_iface->dev;  	struct wa_seg *seg;  	struct wa_xfer *xfer;  	unsigned long flags; +	*dto_waiting = 0; +  	spin_lock_irqsave(&rpipe->seg_lock, flags);  	while (atomic_read(&rpipe->segs_available) > 0 -	      && !list_empty(&rpipe->seg_list)) { +	      && !list_empty(&rpipe->seg_list) +	      && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {  		seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,  				 list_node);  		list_del(&seg->list_node);  		xfer = seg->xfer; -		result = __wa_seg_submit(rpipe, xfer, seg); -		dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", -			xfer, seg->index, atomic_read(&rpipe->segs_available), result); +		/* +		 * Get a reference to the xfer in case the callbacks for the +		 * URBs submitted by __wa_seg_submit attempt to complete +		 * the xfer before this function completes. +		 */ +		wa_xfer_get(xfer); +		result = __wa_seg_submit(rpipe, xfer, seg, &dto_done); +		/* release the dto resource if this RPIPE is done with it. */ +		if (dto_done) +			__wa_dto_put(rpipe->wa); +		dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n", +			xfer, wa_xfer_id(xfer), seg->index, +			atomic_read(&rpipe->segs_available), result);  		if (unlikely(result < 0)) { +			int done; +  			spin_unlock_irqrestore(&rpipe->seg_lock, flags);  			spin_lock_irqsave(&xfer->lock, flags);  			__wa_xfer_abort(xfer); +			/* +			 * This seg was marked as submitted when it was put on +			 * the RPIPE seg_list.  Mark it done. +			 */  			xfer->segs_done++; +			done = __wa_xfer_is_done(xfer);  			spin_unlock_irqrestore(&xfer->lock, flags); +			if (done) +				wa_xfer_completion(xfer);  			spin_lock_irqsave(&rpipe->seg_lock, flags);  		} +		wa_xfer_put(xfer);  	} +	/* +	 * Mark this RPIPE as waiting if dto was not acquired, there are +	 * delayed segs and no active transfers to wake us up later. +	 */ +	if (!dto_acquired && !list_empty(&rpipe->seg_list) +		&& (atomic_read(&rpipe->segs_available) == +			le16_to_cpu(rpipe->descr.wRequests))) +		*dto_waiting = 1; +  	spin_unlock_irqrestore(&rpipe->seg_lock, flags); + +	return dto_done; +} + +static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) +{ +	int dto_waiting; +	int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting); + +	/* +	 * If this RPIPE is waiting on the DTO resource, add it to the tail of +	 * the waiting list. +	 * Otherwise, if the WA DTO resource was acquired and released by +	 *  __wa_xfer_delayed_run, another RPIPE may have attempted to acquire +	 * DTO and failed during that time.  Check the delayed list and process +	 * any waiters.  Start searching from the next RPIPE index. +	 */ +	if (dto_waiting) +		wa_add_delayed_rpipe(rpipe->wa, rpipe); +	else if (dto_done) +		wa_check_for_delayed_rpipes(rpipe->wa);  }  /* @@ -960,7 +1542,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)   */  static int __wa_xfer_submit(struct wa_xfer *xfer)  { -	int result; +	int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;  	struct wahc *wa = xfer->wa;  	struct device *dev = &wa->usb_iface->dev;  	unsigned cnt; @@ -979,27 +1561,58 @@ static int __wa_xfer_submit(struct wa_xfer *xfer)  	result = 0;  	spin_lock_irqsave(&rpipe->seg_lock, flags);  	for (cnt = 0; cnt < xfer->segs; cnt++) { +		int delay_seg = 1; +  		available = atomic_read(&rpipe->segs_available);  		empty = list_empty(&rpipe->seg_list);  		seg = xfer->seg[cnt]; -		dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", -			xfer, cnt, available, empty, -			available == 0 || !empty ? "delayed" : "submitted"); -		if (available == 0 || !empty) { -			dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); +		if (available && empty) { +			/* +			 * Only attempt to acquire DTO if we have a segment +			 * to send. +			 */ +			dto_acquired = __wa_dto_try_get(rpipe->wa); +			if (dto_acquired) { +				delay_seg = 0; +				result = __wa_seg_submit(rpipe, xfer, seg, +							&dto_done); +				dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n", +					xfer, wa_xfer_id(xfer), cnt, available, +					empty); +				if (dto_done) +					__wa_dto_put(rpipe->wa); + +				if (result < 0) { +					__wa_xfer_abort(xfer); +					goto error_seg_submit; +				} +			} +		} + +		if (delay_seg) { +			dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n", +				xfer, wa_xfer_id(xfer), cnt, available,  empty);  			seg->status = WA_SEG_DELAYED;  			list_add_tail(&seg->list_node, &rpipe->seg_list); -		} else { -			result = __wa_seg_submit(rpipe, xfer, seg); -			if (result < 0) { -				__wa_xfer_abort(xfer); -				goto error_seg_submit; -			}  		}  		xfer->segs_submitted++;  	}  error_seg_submit: +	/* +	 * Mark this RPIPE as waiting if dto was not acquired, there are +	 * delayed segs and no active transfers to wake us up later. +	 */ +	if (!dto_acquired && !list_empty(&rpipe->seg_list) +		&& (atomic_read(&rpipe->segs_available) == +			le16_to_cpu(rpipe->descr.wRequests))) +		dto_waiting = 1;  	spin_unlock_irqrestore(&rpipe->seg_lock, flags); + +	if (dto_waiting) +		wa_add_delayed_rpipe(rpipe->wa, rpipe); +	else if (dto_done) +		wa_check_for_delayed_rpipes(rpipe->wa); +  	return result;  } @@ -1025,7 +1638,7 @@ error_seg_submit:   * result never kicks in, the xfer will timeout from the USB code and   * dequeue() will be called.   */ -static void wa_urb_enqueue_b(struct wa_xfer *xfer) +static int wa_urb_enqueue_b(struct wa_xfer *xfer)  {  	int result;  	unsigned long flags; @@ -1036,18 +1649,23 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)  	unsigned done;  	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); -	if (result < 0) +	if (result < 0) { +		pr_err("%s: error_rpipe_get\n", __func__);  		goto error_rpipe_get; +	}  	result = -ENODEV;  	/* FIXME: segmentation broken -- kills DWA */  	mutex_lock(&wusbhc->mutex);		/* get a WUSB dev */  	if (urb->dev == NULL) {  		mutex_unlock(&wusbhc->mutex); +		pr_err("%s: error usb dev gone\n", __func__);  		goto error_dev_gone;  	}  	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);  	if (wusb_dev == NULL) {  		mutex_unlock(&wusbhc->mutex); +		dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n", +			__func__);  		goto error_dev_gone;  	}  	mutex_unlock(&wusbhc->mutex); @@ -1055,21 +1673,35 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)  	spin_lock_irqsave(&xfer->lock, flags);  	xfer->wusb_dev = wusb_dev;  	result = urb->status; -	if (urb->status != -EINPROGRESS) +	if (urb->status != -EINPROGRESS) { +		dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);  		goto error_dequeued; +	}  	result = __wa_xfer_setup(xfer, urb); -	if (result < 0) +	if (result < 0) { +		dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);  		goto error_xfer_setup; +	} +	/* +	 * Get a xfer reference since __wa_xfer_submit starts asynchronous +	 * operations that may try to complete the xfer before this function +	 * exits. +	 */ +	wa_xfer_get(xfer);  	result = __wa_xfer_submit(xfer); -	if (result < 0) +	if (result < 0) { +		dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);  		goto error_xfer_submit; +	}  	spin_unlock_irqrestore(&xfer->lock, flags); -	return; +	wa_xfer_put(xfer); +	return 0; -	/* this is basically wa_xfer_completion() broken up wa_xfer_giveback() -	 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean -	 * upundo setup(). +	/* +	 * this is basically wa_xfer_completion() broken up wa_xfer_giveback() +	 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo +	 * setup().  	 */  error_xfer_setup:  error_dequeued: @@ -1081,8 +1713,7 @@ error_dev_gone:  	rpipe_put(xfer->ep->hcpriv);  error_rpipe_get:  	xfer->result = result; -	wa_xfer_giveback(xfer); -	return; +	return result;  error_xfer_submit:  	done = __wa_xfer_is_done(xfer); @@ -1090,6 +1721,9 @@ error_xfer_submit:  	spin_unlock_irqrestore(&xfer->lock, flags);  	if (done)  		wa_xfer_completion(xfer); +	wa_xfer_put(xfer); +	/* return success since the completion routine will run. */ +	return 0;  }  /* @@ -1123,7 +1757,8 @@ void wa_urb_enqueue_run(struct work_struct *ws)  		list_del_init(&xfer->list_node);  		urb = xfer->urb; -		wa_urb_enqueue_b(xfer); +		if (wa_urb_enqueue_b(xfer) < 0) +			wa_xfer_giveback(xfer);  		usb_put_urb(urb);	/* taken when queuing */  	}  } @@ -1201,6 +1836,12 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,  		dump_stack();  	} +	spin_lock_irqsave(&wa->xfer_list_lock, my_flags); +	result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb); +	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); +	if (result < 0) +		goto error_link_urb; +  	result = -ENOMEM;  	xfer = kzalloc(sizeof(*xfer), gfp);  	if (xfer == NULL) @@ -1229,13 +1870,32 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,  		spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);  		queue_work(wusbd, &wa->xfer_enqueue_work);  	} else { -		wa_urb_enqueue_b(xfer); +		result = wa_urb_enqueue_b(xfer); +		if (result < 0) { +			/* +			 * URB submit/enqueue failed.  Clean up, return an +			 * error and do not run the callback.  This avoids +			 * an infinite submit/complete loop. +			 */ +			dev_err(dev, "%s: URB enqueue failed: %d\n", +			   __func__, result); +			wa_put(xfer->wa); +			wa_xfer_put(xfer); +			spin_lock_irqsave(&wa->xfer_list_lock, my_flags); +			usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); +			spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); +			return result; +		}  	}  	return 0;  error_dequeued:  	kfree(xfer);  error_kmalloc: +	spin_lock_irqsave(&wa->xfer_list_lock, my_flags); +	usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); +	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); +error_link_urb:  	return result;  }  EXPORT_SYMBOL_GPL(wa_urb_enqueue); @@ -1258,31 +1918,51 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue);   * asynch request] and then make sure we cancel each segment.   *   */ -int wa_urb_dequeue(struct wahc *wa, struct urb *urb) +int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)  {  	unsigned long flags, flags2;  	struct wa_xfer *xfer;  	struct wa_seg *seg;  	struct wa_rpipe *rpipe; -	unsigned cnt; +	unsigned cnt, done = 0, xfer_abort_pending;  	unsigned rpipe_ready = 0; +	int result; -	xfer = urb->hcpriv; -	if (xfer == NULL) { +	/* check if it is safe to unlink. */ +	spin_lock_irqsave(&wa->xfer_list_lock, flags); +	result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status); +	if ((result == 0) && urb->hcpriv) {  		/* -		 * Nothing setup yet enqueue will see urb->status != -		 * -EINPROGRESS (by hcd layer) and bail out with -		 * error, no need to do completion +		 * Get a xfer ref to prevent a race with wa_xfer_giveback +		 * cleaning up the xfer while we are working with it.  		 */ -		BUG_ON(urb->status == -EINPROGRESS); -		goto out; +		wa_xfer_get(urb->hcpriv);  	} +	spin_unlock_irqrestore(&wa->xfer_list_lock, flags); +	if (result) +		return result; + +	xfer = urb->hcpriv; +	if (xfer == NULL) +		return -ENOENT;  	spin_lock_irqsave(&xfer->lock, flags); +	pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));  	rpipe = xfer->ep->hcpriv;  	if (rpipe == NULL) { -		pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s", -			__func__, wa_xfer_id(xfer), +		pr_debug("%s: xfer %p id 0x%08X has no RPIPE.  %s", +			__func__, xfer, wa_xfer_id(xfer),  			"Probably already aborted.\n" ); +		result = -ENOENT; +		goto out_unlock; +	} +	/* +	 * Check for done to avoid racing with wa_xfer_giveback and completing +	 * twice. +	 */ +	if (__wa_xfer_is_done(xfer)) { +		pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__, +			xfer, wa_xfer_id(xfer)); +		result = -ENOENT;  		goto out_unlock;  	}  	/* Check the delayed list -> if there, release and complete */ @@ -1293,9 +1973,16 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)  	if (xfer->seg == NULL)  	/* still hasn't reached */  		goto out_unlock;	/* setup(), enqueue_b() completes */  	/* Ok, the xfer is in flight already, it's been setup and submitted.*/ -	__wa_xfer_abort(xfer); +	xfer_abort_pending = __wa_xfer_abort(xfer) >= 0; +	/* +	 * grab the rpipe->seg_lock here to prevent racing with +	 * __wa_xfer_delayed_run. +	 */ +	spin_lock(&rpipe->seg_lock);  	for (cnt = 0; cnt < xfer->segs; cnt++) {  		seg = xfer->seg[cnt]; +		pr_debug("%s: xfer id 0x%08X#%d status = %d\n", +			__func__, wa_xfer_id(xfer), cnt, seg->status);  		switch (seg->status) {  		case WA_SEG_NOTREADY:  		case WA_SEG_READY: @@ -1304,50 +1991,68 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)  			WARN_ON(1);  			break;  		case WA_SEG_DELAYED: +			/* +			 * delete from rpipe delayed list.  If no segments on +			 * this xfer have been submitted, __wa_xfer_is_done will +			 * trigger a giveback below.  Otherwise, the submitted +			 * segments will be completed in the DTI interrupt. +			 */  			seg->status = WA_SEG_ABORTED; -			spin_lock_irqsave(&rpipe->seg_lock, flags2); +			seg->result = -ENOENT;  			list_del(&seg->list_node);  			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe); -			spin_unlock_irqrestore(&rpipe->seg_lock, flags2); -			break; -		case WA_SEG_SUBMITTED: -			seg->status = WA_SEG_ABORTED; -			usb_unlink_urb(&seg->urb); -			if (xfer->is_inbound == 0) -				usb_unlink_urb(seg->dto_urb); -			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe); -			break; -		case WA_SEG_PENDING: -			seg->status = WA_SEG_ABORTED; -			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe); -			break; -		case WA_SEG_DTI_PENDING: -			usb_unlink_urb(wa->dti_urb); -			seg->status = WA_SEG_ABORTED; -			xfer->segs_done++; -			rpipe_ready = rpipe_avail_inc(rpipe);  			break;  		case WA_SEG_DONE:  		case WA_SEG_ERROR:  		case WA_SEG_ABORTED:  			break; +			/* +			 * The buf_in data for a segment in the +			 * WA_SEG_DTI_PENDING state is actively being read. +			 * Let wa_buf_in_cb handle it since it will be called +			 * and will increment xfer->segs_done.  Cleaning up +			 * here could cause wa_buf_in_cb to access the xfer +			 * after it has been completed/freed. +			 */ +		case WA_SEG_DTI_PENDING: +			break; +			/* +			 * In the states below, the HWA device already knows +			 * about the transfer.  If an abort request was sent, +			 * allow the HWA to process it and wait for the +			 * results.  Otherwise, the DTI state and seg completed +			 * counts can get out of sync. +			 */ +		case WA_SEG_SUBMITTED: +		case WA_SEG_PENDING: +			/* +			 * Check if the abort was successfully sent.  This could +			 * be false if the HWA has been removed but we haven't +			 * gotten the disconnect notification yet. +			 */ +			if (!xfer_abort_pending) { +				seg->status = WA_SEG_ABORTED; +				rpipe_ready = rpipe_avail_inc(rpipe); +				xfer->segs_done++; +			} +			break;  		}  	} +	spin_unlock(&rpipe->seg_lock);  	xfer->result = urb->status;	/* -ENOENT or -ECONNRESET */ -	__wa_xfer_is_done(xfer); +	done = __wa_xfer_is_done(xfer);  	spin_unlock_irqrestore(&xfer->lock, flags); -	wa_xfer_completion(xfer); +	if (done) +		wa_xfer_completion(xfer);  	if (rpipe_ready)  		wa_xfer_delayed_run(rpipe); -	return 0; +	wa_xfer_put(xfer); +	return result;  out_unlock:  	spin_unlock_irqrestore(&xfer->lock, flags); -out: -	return 0; +	wa_xfer_put(xfer); +	return result;  dequeue_delayed:  	list_del_init(&xfer->list_node); @@ -1355,6 +2060,7 @@ dequeue_delayed:  	xfer->result = urb->status;  	spin_unlock_irqrestore(&xfer->lock, flags);  	wa_xfer_giveback(xfer); +	wa_xfer_put(xfer);  	usb_put_urb(urb);		/* we got a ref in enqueue() */  	return 0;  } @@ -1383,7 +2089,7 @@ static int wa_xfer_status_to_errno(u8 status)  		[WA_XFER_STATUS_NOT_FOUND] =		0,  		[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,  		[WA_XFER_STATUS_TRANSACTION_ERROR] = 	-EILSEQ, -		[WA_XFER_STATUS_ABORTED] = 		-EINTR, +		[WA_XFER_STATUS_ABORTED] =		-ENOENT,  		[WA_XFER_STATUS_RPIPE_NOT_READY] = 	EINVAL,  		[WA_XFER_INVALID_FORMAT] = 		EINVAL,  		[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = 	EINVAL, @@ -1410,24 +2116,178 @@ static int wa_xfer_status_to_errno(u8 status)  }  /* + * If a last segment flag and/or a transfer result error is encountered, + * no other segment transfer results will be returned from the device. + * Mark the remaining submitted or pending xfers as completed so that + * the xfer will complete cleanly. + * + * xfer->lock must be held + * + */ +static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, +		int starting_index, enum wa_seg_status status) +{ +	int index; +	struct wa_rpipe *rpipe = xfer->ep->hcpriv; + +	for (index = starting_index; index < xfer->segs_submitted; index++) { +		struct wa_seg *current_seg = xfer->seg[index]; + +		BUG_ON(current_seg == NULL); + +		switch (current_seg->status) { +		case WA_SEG_SUBMITTED: +		case WA_SEG_PENDING: +		case WA_SEG_DTI_PENDING: +			rpipe_avail_inc(rpipe); +		/* +		 * do not increment RPIPE avail for the WA_SEG_DELAYED case +		 * since it has not been submitted to the RPIPE. +		 */ +		case WA_SEG_DELAYED: +			xfer->segs_done++; +			current_seg->status = status; +			break; +		case WA_SEG_ABORTED: +			break; +		default: +			WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n", +				__func__, wa_xfer_id(xfer), index, +				current_seg->status); +			break; +		} +	} +} + +/* Populate the given urb based on the current isoc transfer state. */ +static int __wa_populate_buf_in_urb_isoc(struct wahc *wa, +	struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg) +{ +	int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset; +	int seg_index, total_len = 0, urb_frame_index = urb_start_frame; +	struct usb_iso_packet_descriptor *iso_frame_desc = +						xfer->urb->iso_frame_desc; +	const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd); +	int next_frame_contiguous; +	struct usb_iso_packet_descriptor *iso_frame; + +	BUG_ON(buf_in_urb->status == -EINPROGRESS); + +	/* +	 * If the current frame actual_length is contiguous with the next frame +	 * and actual_length is a multiple of the DTI endpoint max packet size, +	 * combine the current frame with the next frame in a single URB.  This +	 * reduces the number of URBs that must be submitted in that case. +	 */ +	seg_index = seg->isoc_frame_index; +	do { +		next_frame_contiguous = 0; + +		iso_frame = &iso_frame_desc[urb_frame_index]; +		total_len += iso_frame->actual_length; +		++urb_frame_index; +		++seg_index; + +		if (seg_index < seg->isoc_frame_count) { +			struct usb_iso_packet_descriptor *next_iso_frame; + +			next_iso_frame = &iso_frame_desc[urb_frame_index]; + +			if ((iso_frame->offset + iso_frame->actual_length) == +				next_iso_frame->offset) +				next_frame_contiguous = 1; +		} +	} while (next_frame_contiguous +			&& ((iso_frame->actual_length % dti_packet_size) == 0)); + +	/* this should always be 0 before a resubmit. */ +	buf_in_urb->num_mapped_sgs	= 0; +	buf_in_urb->transfer_dma = xfer->urb->transfer_dma + +		iso_frame_desc[urb_start_frame].offset; +	buf_in_urb->transfer_buffer_length = total_len; +	buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +	buf_in_urb->transfer_buffer = NULL; +	buf_in_urb->sg = NULL; +	buf_in_urb->num_sgs = 0; +	buf_in_urb->context = seg; + +	/* return the number of frames included in this URB. */ +	return seg_index - seg->isoc_frame_index; +} + +/* Populate the given urb based on the current transfer state. */ +static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer, +	unsigned int seg_idx, unsigned int bytes_transferred) +{ +	int result = 0; +	struct wa_seg *seg = xfer->seg[seg_idx]; + +	BUG_ON(buf_in_urb->status == -EINPROGRESS); +	/* this should always be 0 before a resubmit. */ +	buf_in_urb->num_mapped_sgs	= 0; + +	if (xfer->is_dma) { +		buf_in_urb->transfer_dma = xfer->urb->transfer_dma +			+ (seg_idx * xfer->seg_size); +		buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; +		buf_in_urb->transfer_buffer = NULL; +		buf_in_urb->sg = NULL; +		buf_in_urb->num_sgs = 0; +	} else { +		/* do buffer or SG processing. */ +		buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; + +		if (xfer->urb->transfer_buffer) { +			buf_in_urb->transfer_buffer = +				xfer->urb->transfer_buffer +				+ (seg_idx * xfer->seg_size); +			buf_in_urb->sg = NULL; +			buf_in_urb->num_sgs = 0; +		} else { +			/* allocate an SG list to store seg_size bytes +				and copy the subset of the xfer->urb->sg +				that matches the buffer subset we are +				about to read. */ +			buf_in_urb->sg = wa_xfer_create_subset_sg( +				xfer->urb->sg, +				seg_idx * xfer->seg_size, +				bytes_transferred, +				&(buf_in_urb->num_sgs)); + +			if (!(buf_in_urb->sg)) { +				buf_in_urb->num_sgs	= 0; +				result = -ENOMEM; +			} +			buf_in_urb->transfer_buffer = NULL; +		} +	} +	buf_in_urb->transfer_buffer_length = bytes_transferred; +	buf_in_urb->context = seg; + +	return result; +} + +/*   * Process a xfer result completion message   * - * inbound transfers: need to schedule a DTI read + * inbound transfers: need to schedule a buf_in_urb read   *   * FIXME: this function needs to be broken up in parts   */ -static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) +static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, +		struct wa_xfer_result *xfer_result)  {  	int result;  	struct device *dev = &wa->usb_iface->dev;  	unsigned long flags; -	u8 seg_idx; +	unsigned int seg_idx;  	struct wa_seg *seg;  	struct wa_rpipe *rpipe; -	struct wa_xfer_result *xfer_result = wa->xfer_result; -	u8 done = 0; +	unsigned done = 0;  	u8 usb_status;  	unsigned rpipe_ready = 0; +	unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength); +	struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);  	spin_lock_irqsave(&xfer->lock, flags);  	seg_idx = xfer_result->bTransferSegment & 0x7f; @@ -1436,8 +2296,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)  	seg = xfer->seg[seg_idx];  	rpipe = xfer->ep->hcpriv;  	usb_status = xfer_result->bTransferStatus; -	dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n", -		xfer, seg_idx, usb_status, seg->status); +	dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n", +		xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);  	if (seg->status == WA_SEG_ABORTED  	    || seg->status == WA_SEG_ERROR)	/* already handled */  		goto segment_aborted; @@ -1451,71 +2311,48 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)  	}  	if (usb_status & 0x80) {  		seg->result = wa_xfer_status_to_errno(usb_status); -		dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n", +		dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",  			xfer, xfer->id, seg->index, usb_status); +		seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ? +			WA_SEG_ABORTED : WA_SEG_ERROR;  		goto error_complete;  	}  	/* FIXME: we ignore warnings, tally them for stats */  	if (usb_status & 0x40) 		/* Warning?... */  		usb_status = 0;		/* ... pass */ -	if (xfer->is_inbound) {	/* IN data phase: read to buffer */ +	/* +	 * If the last segment bit is set, complete the remaining segments. +	 * When the current segment is completed, either in wa_buf_in_cb for +	 * transfers with data or below for no data, the xfer will complete. +	 */ +	if (xfer_result->bTransferSegment & 0x80) +		wa_complete_remaining_xfer_segs(xfer, seg->index + 1, +			WA_SEG_DONE); +	if (usb_pipeisoc(xfer->urb->pipe) +		&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) { +		/* set up WA state to read the isoc packet status next. */ +		wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer); +		wa->dti_isoc_xfer_seg = seg_idx; +		wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING; +	} else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe) +			&& (bytes_transferred > 0)) { +		/* IN data phase: read to buffer */  		seg->status = WA_SEG_DTI_PENDING; -		BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); -		/* this should always be 0 before a resubmit. */ -		wa->buf_in_urb->num_mapped_sgs	= 0; - -		if (xfer->is_dma) { -			wa->buf_in_urb->transfer_dma = -				xfer->urb->transfer_dma -				+ (seg_idx * xfer->seg_size); -			wa->buf_in_urb->transfer_flags -				|= URB_NO_TRANSFER_DMA_MAP; -			wa->buf_in_urb->transfer_buffer = NULL; -			wa->buf_in_urb->sg = NULL; -			wa->buf_in_urb->num_sgs = 0; -		} else { -			/* do buffer or SG processing. */ -			wa->buf_in_urb->transfer_flags -				&= ~URB_NO_TRANSFER_DMA_MAP; - -			if (xfer->urb->transfer_buffer) { -				wa->buf_in_urb->transfer_buffer = -					xfer->urb->transfer_buffer -					+ (seg_idx * xfer->seg_size); -				wa->buf_in_urb->sg = NULL; -				wa->buf_in_urb->num_sgs = 0; -			} else { -				/* allocate an SG list to store seg_size bytes -					and copy the subset of the xfer->urb->sg -					that matches the buffer subset we are -					about to read. */ -				wa->buf_in_urb->sg = wa_xfer_create_subset_sg( -					xfer->urb->sg, -					seg_idx * xfer->seg_size, -					le32_to_cpu( -						xfer_result->dwTransferLength), -					&(wa->buf_in_urb->num_sgs)); - -				if (!(wa->buf_in_urb->sg)) { -					wa->buf_in_urb->num_sgs	= 0; -					goto error_sg_alloc; -				} -				wa->buf_in_urb->transfer_buffer = NULL; -			} -		} -		wa->buf_in_urb->transfer_buffer_length = -			le32_to_cpu(xfer_result->dwTransferLength); -		wa->buf_in_urb->context = seg; -		result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); +		result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx, +			bytes_transferred);  		if (result < 0) +			goto error_buf_in_populate; +		++(wa->active_buf_in_urbs); +		result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); +		if (result < 0) { +			--(wa->active_buf_in_urbs);  			goto error_submit_buf_in; +		}  	} else { -		/* OUT data phase, complete it -- */ -		seg->status = WA_SEG_DONE; -		seg->result = le32_to_cpu(xfer_result->dwTransferLength); -		xfer->segs_done++; +		/* OUT data phase or no data, complete it -- */ +		seg->result = bytes_transferred;  		rpipe_ready = rpipe_avail_inc(rpipe); -		done = __wa_xfer_is_done(xfer); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);  	}  	spin_unlock_irqrestore(&xfer->lock, flags);  	if (done) @@ -1534,13 +2371,15 @@ error_submit_buf_in:  		dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",  			xfer, seg_idx, result);  	seg->result = result; -	kfree(wa->buf_in_urb->sg); -error_sg_alloc: +	kfree(buf_in_urb->sg); +	buf_in_urb->sg = NULL; +error_buf_in_populate:  	__wa_xfer_abort(xfer); -error_complete:  	seg->status = WA_SEG_ERROR; +error_complete:  	xfer->segs_done++;  	rpipe_ready = rpipe_avail_inc(rpipe); +	wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);  	done = __wa_xfer_is_done(xfer);  	/*  	 * queue work item to clear STALL for control endpoints. @@ -1551,12 +2390,10 @@ error_complete:  		done) {  		dev_info(dev, "Control EP stall.  Queue delayed work.\n"); -		spin_lock_irq(&wa->xfer_list_lock); -		/* remove xfer from xfer_list. */ -		list_del(&xfer->list_node); -		/* add xfer to xfer_errored_list. */ -		list_add_tail(&xfer->list_node, &wa->xfer_errored_list); -		spin_unlock_irq(&wa->xfer_list_lock); +		spin_lock(&wa->xfer_list_lock); +		/* move xfer from xfer_list to xfer_errored_list. */ +		list_move_tail(&xfer->list_node, &wa->xfer_errored_list); +		spin_unlock(&wa->xfer_list_lock);  		spin_unlock_irqrestore(&xfer->lock, flags);  		queue_work(wusbd, &wa->xfer_error_work);  	} else { @@ -1571,7 +2408,7 @@ error_complete:  error_bad_seg:  	spin_unlock_irqrestore(&xfer->lock, flags); -	wa_urb_dequeue(wa, xfer->urb); +	wa_urb_dequeue(wa, xfer->urb, -ENOENT);  	if (printk_ratelimit())  		dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);  	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { @@ -1587,6 +2424,154 @@ segment_aborted:  }  /* + * Process a isochronous packet status message + * + * inbound transfers: need to schedule a buf_in_urb read + */ +static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb) +{ +	struct device *dev = &wa->usb_iface->dev; +	struct wa_xfer_packet_status_hwaiso *packet_status; +	struct wa_xfer_packet_status_len_hwaiso *status_array; +	struct wa_xfer *xfer; +	unsigned long flags; +	struct wa_seg *seg; +	struct wa_rpipe *rpipe; +	unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index; +	unsigned first_frame_index = 0, rpipe_ready = 0; +	int expected_size; + +	/* We have a xfer result buffer; check it */ +	dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n", +		urb->actual_length, urb->transfer_buffer); +	packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf); +	if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) { +		dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n", +			packet_status->bPacketType); +		goto error_parse_buffer; +	} +	xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress); +	if (xfer == NULL) { +		dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n", +			wa->dti_isoc_xfer_in_progress); +		goto error_parse_buffer; +	} +	spin_lock_irqsave(&xfer->lock, flags); +	if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs)) +		goto error_bad_seg; +	seg = xfer->seg[wa->dti_isoc_xfer_seg]; +	rpipe = xfer->ep->hcpriv; +	expected_size = sizeof(*packet_status) + +			(sizeof(packet_status->PacketStatus[0]) * +			seg->isoc_frame_count); +	if (urb->actual_length != expected_size) { +		dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n", +			urb->actual_length, expected_size); +		goto error_bad_seg; +	} +	if (le16_to_cpu(packet_status->wLength) != expected_size) { +		dev_err(dev, "DTI Error: isoc packet status--bad length %u\n", +			le16_to_cpu(packet_status->wLength)); +		goto error_bad_seg; +	} +	/* write isoc packet status and lengths back to the xfer urb. */ +	status_array = packet_status->PacketStatus; +	xfer->urb->start_frame = +		wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd); +	for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) { +		struct usb_iso_packet_descriptor *iso_frame_desc = +			xfer->urb->iso_frame_desc; +		const int xfer_frame_index = +			seg->isoc_frame_offset + seg_index; + +		iso_frame_desc[xfer_frame_index].status = +			wa_xfer_status_to_errno( +			le16_to_cpu(status_array[seg_index].PacketStatus)); +		iso_frame_desc[xfer_frame_index].actual_length = +			le16_to_cpu(status_array[seg_index].PacketLength); +		/* track the number of frames successfully transferred. */ +		if (iso_frame_desc[xfer_frame_index].actual_length > 0) { +			/* save the starting frame index for buf_in_urb. */ +			if (!data_frame_count) +				first_frame_index = seg_index; +			++data_frame_count; +		} +	} + +	if (xfer->is_inbound && data_frame_count) { +		int result, total_frames_read = 0, urb_index = 0; +		struct urb *buf_in_urb; + +		/* IN data phase: read to buffer */ +		seg->status = WA_SEG_DTI_PENDING; + +		/* start with the first frame with data. */ +		seg->isoc_frame_index = first_frame_index; +		/* submit up to WA_MAX_BUF_IN_URBS read URBs. */ +		do { +			int urb_frame_index, urb_frame_count; +			struct usb_iso_packet_descriptor *iso_frame_desc; + +			buf_in_urb = &(wa->buf_in_urbs[urb_index]); +			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, +				buf_in_urb, xfer, seg); +			/* advance frame index to start of next read URB. */ +			seg->isoc_frame_index += urb_frame_count; +			total_frames_read += urb_frame_count; + +			++(wa->active_buf_in_urbs); +			result = usb_submit_urb(buf_in_urb, GFP_ATOMIC); + +			/* skip 0-byte frames. */ +			urb_frame_index = +				seg->isoc_frame_offset + seg->isoc_frame_index; +			iso_frame_desc = +				&(xfer->urb->iso_frame_desc[urb_frame_index]); +			while ((seg->isoc_frame_index < +						seg->isoc_frame_count) && +				 (iso_frame_desc->actual_length == 0)) { +				++(seg->isoc_frame_index); +				++iso_frame_desc; +			} +			++urb_index; + +		} while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS) +				&& (seg->isoc_frame_index < +						seg->isoc_frame_count)); + +		if (result < 0) { +			--(wa->active_buf_in_urbs); +			dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", +				result); +			wa_reset_all(wa); +		} else if (data_frame_count > total_frames_read) +			/* If we need to read more frames, set DTI busy. */ +			dti_busy = 1; +	} else { +		/* OUT transfer or no more IN data, complete it -- */ +		rpipe_ready = rpipe_avail_inc(rpipe); +		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); +	} +	spin_unlock_irqrestore(&xfer->lock, flags); +	if (dti_busy) +		wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING; +	else +		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; +	if (done) +		wa_xfer_completion(xfer); +	if (rpipe_ready) +		wa_xfer_delayed_run(rpipe); +	wa_xfer_put(xfer); +	return dti_busy; + +error_bad_seg: +	spin_unlock_irqrestore(&xfer->lock, flags); +	wa_xfer_put(xfer); +error_parse_buffer: +	return dti_busy; +} + +/*   * Callback for the IN data phase   *   * If successful transition state; otherwise, take a note of the @@ -1603,27 +2588,86 @@ static void wa_buf_in_cb(struct urb *urb)  	struct wahc *wa;  	struct device *dev;  	struct wa_rpipe *rpipe; -	unsigned rpipe_ready; +	unsigned rpipe_ready = 0, isoc_data_frame_count = 0;  	unsigned long flags; +	int resubmit_dti = 0, active_buf_in_urbs;  	u8 done = 0;  	/* free the sg if it was used. */  	kfree(urb->sg);  	urb->sg = NULL; +	spin_lock_irqsave(&xfer->lock, flags); +	wa = xfer->wa; +	dev = &wa->usb_iface->dev; +	--(wa->active_buf_in_urbs); +	active_buf_in_urbs = wa->active_buf_in_urbs; + +	if (usb_pipeisoc(xfer->urb->pipe)) { +		struct usb_iso_packet_descriptor *iso_frame_desc = +			xfer->urb->iso_frame_desc; +		int	seg_index; + +		/* +		 * Find the next isoc frame with data and count how many +		 * frames with data remain. +		 */ +		seg_index = seg->isoc_frame_index; +		while (seg_index < seg->isoc_frame_count) { +			const int urb_frame_index = +				seg->isoc_frame_offset + seg_index; + +			if (iso_frame_desc[urb_frame_index].actual_length > 0) { +				/* save the index of the next frame with data */ +				if (!isoc_data_frame_count) +					seg->isoc_frame_index = seg_index; +				++isoc_data_frame_count; +			} +			++seg_index; +		} +	} +	spin_unlock_irqrestore(&xfer->lock, flags); +  	switch (urb->status) {  	case 0:  		spin_lock_irqsave(&xfer->lock, flags); -		wa = xfer->wa; -		dev = &wa->usb_iface->dev; -		rpipe = xfer->ep->hcpriv; -		dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", -			xfer, seg->index, (size_t)urb->actual_length); -		seg->status = WA_SEG_DONE; -		seg->result = urb->actual_length; -		xfer->segs_done++; -		rpipe_ready = rpipe_avail_inc(rpipe); -		done = __wa_xfer_is_done(xfer); + +		seg->result += urb->actual_length; +		if (isoc_data_frame_count > 0) { +			int result, urb_frame_count; + +			/* submit a read URB for the next frame with data. */ +			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb, +				 xfer, seg); +			/* advance index to start of next read URB. */ +			seg->isoc_frame_index += urb_frame_count; +			++(wa->active_buf_in_urbs); +			result = usb_submit_urb(urb, GFP_ATOMIC); +			if (result < 0) { +				--(wa->active_buf_in_urbs); +				dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", +					result); +				wa_reset_all(wa); +			} +			/* +			 * If we are in this callback and +			 * isoc_data_frame_count > 0, it means that the dti_urb +			 * submission was delayed in wa_dti_cb.  Once +			 * we submit the last buf_in_urb, we can submit the +			 * delayed dti_urb. +			 */ +			  resubmit_dti = (isoc_data_frame_count == +							urb_frame_count); +		} else if (active_buf_in_urbs == 0) { +			rpipe = xfer->ep->hcpriv; +			dev_dbg(dev, +				"xfer %p 0x%08X#%u: data in done (%zu bytes)\n", +				xfer, wa_xfer_id(xfer), seg->index, +				seg->result); +			rpipe_ready = rpipe_avail_inc(rpipe); +			done = __wa_xfer_mark_seg_as_done(xfer, seg, +					WA_SEG_DONE); +		}  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done)  			wa_xfer_completion(xfer); @@ -1634,31 +2678,50 @@ static void wa_buf_in_cb(struct urb *urb)  	case -ENOENT:		/* as it was done by the who unlinked us */  		break;  	default:		/* Other errors ... */ +		/* +		 * Error on data buf read.  Only resubmit DTI if it hasn't +		 * already been done by previously hitting this error or by a +		 * successful completion of the previous buf_in_urb. +		 */ +		resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;  		spin_lock_irqsave(&xfer->lock, flags); -		wa = xfer->wa; -		dev = &wa->usb_iface->dev;  		rpipe = xfer->ep->hcpriv;  		if (printk_ratelimit()) -			dev_err(dev, "xfer %p#%u: data in error %d\n", -				xfer, seg->index, urb->status); +			dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n", +				xfer, wa_xfer_id(xfer), seg->index, +				urb->status);  		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,  			    EDC_ERROR_TIMEFRAME)){  			dev_err(dev, "DTO: URB max acceptable errors "  				"exceeded, resetting device\n");  			wa_reset_all(wa);  		} -		seg->status = WA_SEG_ERROR;  		seg->result = urb->status; -		xfer->segs_done++;  		rpipe_ready = rpipe_avail_inc(rpipe); -		__wa_xfer_abort(xfer); -		done = __wa_xfer_is_done(xfer); +		if (active_buf_in_urbs == 0) +			done = __wa_xfer_mark_seg_as_done(xfer, seg, +				WA_SEG_ERROR); +		else +			__wa_xfer_abort(xfer);  		spin_unlock_irqrestore(&xfer->lock, flags);  		if (done)  			wa_xfer_completion(xfer);  		if (rpipe_ready)  			wa_xfer_delayed_run(rpipe);  	} + +	if (resubmit_dti) { +		int result; + +		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; + +		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); +		if (result < 0) { +			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", +				result); +			wa_reset_all(wa); +		} +	}  }  /* @@ -1687,56 +2750,65 @@ static void wa_buf_in_cb(struct urb *urb)   * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many   * errors) in the URBs.   */ -static void wa_xfer_result_cb(struct urb *urb) +static void wa_dti_cb(struct urb *urb)  { -	int result; +	int result, dti_busy = 0;  	struct wahc *wa = urb->context;  	struct device *dev = &wa->usb_iface->dev; -	struct wa_xfer_result *xfer_result;  	u32 xfer_id; -	struct wa_xfer *xfer;  	u8 usb_status;  	BUG_ON(wa->dti_urb != urb);  	switch (wa->dti_urb->status) {  	case 0: -		/* We have a xfer result buffer; check it */ -		dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", -			urb->actual_length, urb->transfer_buffer); -		if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { -			dev_err(dev, "DTI Error: xfer result--bad size " -				"xfer result (%d bytes vs %zu needed)\n", -				urb->actual_length, sizeof(*xfer_result)); -			break; -		} -		xfer_result = wa->xfer_result; -		if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { -			dev_err(dev, "DTI Error: xfer result--" -				"bad header length %u\n", -				xfer_result->hdr.bLength); -			break; -		} -		if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { -			dev_err(dev, "DTI Error: xfer result--" -				"bad header type 0x%02x\n", -				xfer_result->hdr.bNotifyType); -			break; -		} -		usb_status = xfer_result->bTransferStatus & 0x3f; -		if (usb_status == WA_XFER_STATUS_NOT_FOUND) -			/* taken care of already */ -			break; -		xfer_id = xfer_result->dwTransferID; -		xfer = wa_xfer_get_by_id(wa, xfer_id); -		if (xfer == NULL) { -			/* FIXME: transaction might have been cancelled */ -			dev_err(dev, "DTI Error: xfer result--" -				"unknown xfer 0x%08x (status 0x%02x)\n", -				xfer_id, usb_status); -			break; +		if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) { +			struct wa_xfer_result *xfer_result; +			struct wa_xfer *xfer; + +			/* We have a xfer result buffer; check it */ +			dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", +				urb->actual_length, urb->transfer_buffer); +			if (urb->actual_length != sizeof(*xfer_result)) { +				dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n", +					urb->actual_length, +					sizeof(*xfer_result)); +				break; +			} +			xfer_result = (struct wa_xfer_result *)(wa->dti_buf); +			if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { +				dev_err(dev, "DTI Error: xfer result--bad header length %u\n", +					xfer_result->hdr.bLength); +				break; +			} +			if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { +				dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n", +					xfer_result->hdr.bNotifyType); +				break; +			} +			xfer_id = le32_to_cpu(xfer_result->dwTransferID); +			usb_status = xfer_result->bTransferStatus & 0x3f; +			if (usb_status == WA_XFER_STATUS_NOT_FOUND) { +				/* taken care of already */ +				dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n", +					__func__, xfer_id, +					xfer_result->bTransferSegment & 0x7f); +				break; +			} +			xfer = wa_xfer_get_by_id(wa, xfer_id); +			if (xfer == NULL) { +				/* FIXME: transaction not found. */ +				dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n", +					xfer_id, usb_status); +				break; +			} +			wa_xfer_result_chew(wa, xfer, xfer_result); +			wa_xfer_put(xfer); +		} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) { +			dti_busy = wa_process_iso_packet_status(wa, urb); +		} else { +			dev_err(dev, "DTI Error: unexpected EP state = %d\n", +				wa->dti_state);  		} -		wa_xfer_result_chew(wa, xfer); -		wa_xfer_put(xfer);  		break;  	case -ENOENT:		/* (we killed the URB)...so, no broadcast */  	case -ESHUTDOWN:	/* going away! */ @@ -1755,18 +2827,69 @@ static void wa_xfer_result_cb(struct urb *urb)  			dev_err(dev, "DTI: URB error %d\n", urb->status);  		break;  	} -	/* Resubmit the DTI URB */ -	result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); -	if (result < 0) { -		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " -			"resetting\n", result); -		wa_reset_all(wa); + +	/* Resubmit the DTI URB if we are not busy processing isoc in frames. */ +	if (!dti_busy) { +		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); +		if (result < 0) { +			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", +				result); +			wa_reset_all(wa); +		}  	}  out:  	return;  }  /* + * Initialize the DTI URB for reading transfer result notifications and also + * the buffer-in URB, for reading buffers. Then we just submit the DTI URB. + */ +int wa_dti_start(struct wahc *wa) +{ +	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; +	struct device *dev = &wa->usb_iface->dev; +	int result = -ENOMEM, index; + +	if (wa->dti_urb != NULL)	/* DTI URB already started */ +		goto out; + +	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); +	if (wa->dti_urb == NULL) { +		dev_err(dev, "Can't allocate DTI URB\n"); +		goto error_dti_urb_alloc; +	} +	usb_fill_bulk_urb( +		wa->dti_urb, wa->usb_dev, +		usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress), +		wa->dti_buf, wa->dti_buf_size, +		wa_dti_cb, wa); + +	/* init the buf in URBs */ +	for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) { +		usb_fill_bulk_urb( +			&(wa->buf_in_urbs[index]), wa->usb_dev, +			usb_rcvbulkpipe(wa->usb_dev, +				0x80 | dti_epd->bEndpointAddress), +			NULL, 0, wa_buf_in_cb, wa); +	} +	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); +	if (result < 0) { +		dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n", +			result); +		goto error_dti_urb_submit; +	} +out: +	return 0; + +error_dti_urb_submit: +	usb_put_urb(wa->dti_urb); +	wa->dti_urb = NULL; +error_dti_urb_alloc: +	return result; +} +EXPORT_SYMBOL_GPL(wa_dti_start); +/*   * Transfer complete notification   *   * Called from the notif.c code. We get a notification on EP2 saying @@ -1777,18 +2900,13 @@ out:   * don't really set it up and start it until the first xfer complete   * notification arrives, which is what we do here.   * - * Follow up in wa_xfer_result_cb(), as that's where the whole state + * Follow up in wa_dti_cb(), as that's where the whole state   * machine starts.   * - * So here we just initialize the DTI URB for reading transfer result - * notifications and also the buffer-in URB, for reading buffers. Then - * we just submit the DTI URB. - *   * @wa shall be referenced   */  void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)  { -	int result;  	struct device *dev = &wa->usb_iface->dev;  	struct wa_notif_xfer *notif_xfer;  	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; @@ -1802,44 +2920,13 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)  			notif_xfer->bEndpoint, dti_epd->bEndpointAddress);  		goto error;  	} -	if (wa->dti_urb != NULL)	/* DTI URB already started */ -		goto out; -	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (wa->dti_urb == NULL) { -		dev_err(dev, "Can't allocate DTI URB\n"); -		goto error_dti_urb_alloc; -	} -	usb_fill_bulk_urb( -		wa->dti_urb, wa->usb_dev, -		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), -		wa->xfer_result, wa->xfer_result_size, -		wa_xfer_result_cb, wa); +	/* attempt to start the DTI ep processing. */ +	if (wa_dti_start(wa) < 0) +		goto error; -	wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (wa->buf_in_urb == NULL) { -		dev_err(dev, "Can't allocate BUF-IN URB\n"); -		goto error_buf_in_urb_alloc; -	} -	usb_fill_bulk_urb( -		wa->buf_in_urb, wa->usb_dev, -		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), -		NULL, 0, wa_buf_in_cb, wa); -	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); -	if (result < 0) { -		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " -			"resetting\n", result); -		goto error_dti_urb_submit; -	} -out:  	return; -error_dti_urb_submit: -	usb_put_urb(wa->buf_in_urb); -error_buf_in_urb_alloc: -	usb_put_urb(wa->dti_urb); -	wa->dti_urb = NULL; -error_dti_urb_alloc:  error:  	wa_reset_all(wa);  }  | 
