diff options
Diffstat (limited to 'net/sctp/ulpqueue.c')
| -rw-r--r-- | net/sctp/ulpqueue.c | 150 | 
1 files changed, 100 insertions, 50 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index c7f7e49609c..d49dc2ed30a 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -21,38 +21,32 @@   * See the GNU General Public License for more details.   *   * You should have received a copy of the GNU General Public License - * along with GNU CC; see the file COPYING.  If not, write to - * the Free Software Foundation, 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * along with GNU CC; see the file COPYING.  If not, see + * <http://www.gnu.org/licenses/>.   *   * Please send any bug reports or fixes you make to the   * email address(es): - *    lksctp developers <lksctp-developers@lists.sourceforge.net> - * - * Or submit a bug report through the following website: - *    http://www.sf.net/projects/lksctp + *    lksctp developers <linux-sctp@vger.kernel.org>   *   * Written or modified by:   *    Jon Grimm             <jgrimm@us.ibm.com>   *    La Monte H.P. Yarroll <piggy@acm.org>   *    Sridhar Samudrala     <sri@us.ibm.com> - * - * Any bugs reported given to us we will try to fix... any fixes shared will - * be incorporated into the next SCTP release.   */  #include <linux/slab.h>  #include <linux/types.h>  #include <linux/skbuff.h>  #include <net/sock.h> +#include <net/busy_poll.h>  #include <net/sctp/structs.h>  #include <net/sctp/sctp.h>  #include <net/sctp/sm.h>  /* Forward declarations for internal helpers.  */ -static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, +static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,  					      struct sctp_ulpevent *); -static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, +static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,  					      struct sctp_ulpevent *);  static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq); @@ -68,7 +62,6 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,  	skb_queue_head_init(&ulpq->reasm);  	skb_queue_head_init(&ulpq->lobby);  	ulpq->pd_mode  = 0; -	ulpq->malloced = 0;  	return ulpq;  } @@ -96,8 +89,6 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)  void sctp_ulpq_free(struct sctp_ulpq *ulpq)  {  	sctp_ulpq_flush(ulpq); -	if (ulpq->malloced) -		kfree(ulpq);  }  /* Process an incoming DATA chunk.  */ @@ -105,10 +96,8 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  			gfp_t gfp)  {  	struct sk_buff_head temp; -	sctp_data_chunk_t *hdr;  	struct sctp_ulpevent *event; - -	hdr = (sctp_data_chunk_t *) chunk->chunk_hdr; +	int event_eor = 0;  	/* Create an event from the incoming chunk. */  	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); @@ -119,7 +108,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  	event = sctp_ulpq_reasm(ulpq, event);  	/* Do ordering if needed.  */ -	if ((event) && (event->msg_flags & MSG_EOR)){ +	if ((event) && (event->msg_flags & MSG_EOR)) {  		/* Create a temporary list to collect chunks on.  */  		skb_queue_head_init(&temp);  		__skb_queue_tail(&temp, sctp_event2skb(event)); @@ -130,10 +119,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  	/* Send event to the ULP.  'event' is the sctp_ulpevent for  	 * very first SKB on the 'temp' list.  	 */ -	if (event) +	if (event) { +		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;  		sctp_ulpq_tail_event(ulpq, event); +	} -	return 0; +	return event_eor;  }  /* Add a new event for propagation to the ULP.  */ @@ -214,6 +205,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)  	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))  		goto out_free; +	if (!sctp_ulpevent_is_notification(event)) +		sk_mark_napi_id(sk, skb); +  	/* Check if the user wishes to receive this event.  */  	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))  		goto out_free; @@ -243,7 +237,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)  		} else {  			/*  			 * If fragment interleave is enabled, we -			 * can queue this to the recieve queue instead +			 * can queue this to the receive queue instead  			 * of the lobby.  			 */  			if (sctp_sk(sk)->frag_interleave) @@ -269,7 +263,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)  		sctp_ulpq_clear_pd(ulpq);  	if (queue == &sk->sk_receive_queue) -		sk->sk_data_ready(sk, 0); +		sk->sk_data_ready(sk);  	return 1;  out_free: @@ -329,7 +323,9 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,   * payload was fragmented on the way and ip had to reassemble them.   * We add the rest of skb's to the first skb's fraglist.   */ -static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) +static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net, +	struct sk_buff_head *queue, struct sk_buff *f_frag, +	struct sk_buff *l_frag)  {  	struct sk_buff *pos;  	struct sk_buff *new = NULL; @@ -344,7 +340,8 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu  		pos = f_frag->next;  	/* Get the last skb in the f_frag's frag_list if present. */ -	for (last = list; list; last = list, list = list->next); +	for (last = list; list; last = list, list = list->next) +		;  	/* Add the list of remaining fragments to the first fragments  	 * frag_list. @@ -397,7 +394,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu  	}  	event = sctp_skb2event(f_frag); -	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); +	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);  	return event;  } @@ -496,7 +493,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul  		cevent = sctp_skb2event(pd_first);  		pd_point = sctp_sk(asoc->base.sk)->pd_point;  		if (pd_point && pd_point <= pd_len) { -			retval = sctp_make_reassembled_event(&ulpq->reasm, +			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), +							     &ulpq->reasm,  							     pd_first,  							     pd_last);  			if (retval) @@ -506,7 +504,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul  done:  	return retval;  found: -	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos); +	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), +					     &ulpq->reasm, first_frag, pos);  	if (retval)  		retval->msg_flags |= MSG_EOR;  	goto done; @@ -539,14 +538,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)  		ctsn = cevent->tsn;  		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { +		case SCTP_DATA_FIRST_FRAG: +			if (!first_frag) +				return NULL; +			goto done;  		case SCTP_DATA_MIDDLE_FRAG:  			if (!first_frag) {  				first_frag = pos;  				next_tsn = ctsn + 1;  				last_frag = pos; -			} else if (next_tsn == ctsn) +			} else if (next_tsn == ctsn) {  				next_tsn++; -			else +				last_frag = pos; +			} else  				goto done;  			break;  		case SCTP_DATA_LAST_FRAG: @@ -566,7 +570,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)  	 * further.  	 */  done: -	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); +	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), +					&ulpq->reasm, first_frag, last_frag);  	if (retval && is_last)  		retval->msg_flags |= MSG_EOR; @@ -649,6 +654,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)  			} else  				goto done;  			break; + +		case SCTP_DATA_LAST_FRAG: +			if (!first_frag) +				return NULL; +			else +				goto done; +			break; +  		default:  			return NULL;  		} @@ -658,7 +671,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)  	 * further.  	 */  done: -	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); +	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), +					&ulpq->reasm, first_frag, last_frag);  	return retval;  } @@ -717,7 +731,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)  	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {  		/* Do ordering if needed.  */ -		if ((event) && (event->msg_flags & MSG_EOR)){ +		if ((event) && (event->msg_flags & MSG_EOR)) {  			skb_queue_head_init(&temp);  			__skb_queue_tail(&temp, sctp_event2skb(event)); @@ -743,11 +757,9 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,  	struct sk_buff *pos, *tmp;  	struct sctp_ulpevent *cevent;  	struct sctp_stream *in; -	__u16 sid, csid; -	__u16 ssn, cssn; +	__u16 sid, csid, cssn;  	sid = event->stream; -	ssn = event->ssn;  	in  = &ulpq->asoc->ssnmap->in;  	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; @@ -961,20 +973,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,  		struct sk_buff_head *list, __u16 needed)  {  	__u16 freed = 0; -	__u32 tsn; -	struct sk_buff *skb; +	__u32 tsn, last_tsn; +	struct sk_buff *skb, *flist, *last;  	struct sctp_ulpevent *event;  	struct sctp_tsnmap *tsnmap;  	tsnmap = &ulpq->asoc->peer.tsn_map; -	while ((skb = __skb_dequeue_tail(list)) != NULL) { -		freed += skb_headlen(skb); +	while ((skb = skb_peek_tail(list)) != NULL) {  		event = sctp_skb2event(skb);  		tsn = event->tsn; +		/* Don't renege below the Cumulative TSN ACK Point. */ +		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) +			break; + +		/* Events in ordering queue may have multiple fragments +		 * corresponding to additional TSNs.  Sum the total +		 * freed space; find the last TSN. +		 */ +		freed += skb_headlen(skb); +		flist = skb_shinfo(skb)->frag_list; +		for (last = flist; flist; flist = flist->next) { +			last = flist; +			freed += skb_headlen(last); +		} +		if (last) +			last_tsn = sctp_skb2event(last)->tsn; +		else +			last_tsn = tsn; + +		/* Unlink the event, then renege all applicable TSNs. */ +		__skb_unlink(skb, list);  		sctp_ulpevent_free(event); -		sctp_tsnmap_renege(tsnmap, tsn); +		while (TSN_lte(tsn, last_tsn)) { +			sctp_tsnmap_renege(tsnmap, tsn); +			tsn++; +		}  		if (freed >= needed)  			return freed;  	} @@ -996,22 +1031,33 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)  /* Partial deliver the first message as there is pressure on rwnd. */  void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, -				struct sctp_chunk *chunk,  				gfp_t gfp)  {  	struct sctp_ulpevent *event;  	struct sctp_association *asoc;  	struct sctp_sock *sp; +	__u32 ctsn; +	struct sk_buff *skb;  	asoc = ulpq->asoc;  	sp = sctp_sk(asoc->base.sk);  	/* If the association is already in Partial Delivery mode -	 * we have noting to do. +	 * we have nothing to do.  	 */  	if (ulpq->pd_mode)  		return; +	/* Data must be at or below the Cumulative TSN ACK Point to +	 * start partial delivery. +	 */ +	skb = skb_peek(&asoc->ulpq.reasm); +	if (skb != NULL) { +		ctsn = sctp_skb2event(skb)->tsn; +		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) +			return; +	} +  	/* If the user enabled fragment interleave socket option,  	 * multiple associations can enter partial delivery.  	 * Otherwise, we can only enter partial delivery if the @@ -1054,12 +1100,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,  	}  	/* If able to free enough room, accept this chunk. */  	if (chunk && (freed >= needed)) { -		__u32 tsn; -		tsn = ntohl(chunk->subh.data_hdr->tsn); -		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn); -		sctp_ulpq_tail_data(ulpq, chunk, gfp); - -		sctp_ulpq_partial_delivery(ulpq, chunk, gfp); +		int retval; +		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); +		/* +		 * Enter partial delivery if chunk has not been +		 * delivered; otherwise, drain the reassembly queue. +		 */ +		if (retval <= 0) +			sctp_ulpq_partial_delivery(ulpq, gfp); +		else if (retval == 1) +			sctp_ulpq_reasm_drain(ulpq);  	}  	sk_mem_reclaim(asoc->base.sk); @@ -1089,5 +1139,5 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)  	/* If there is data waiting, send it up the socket now. */  	if (sctp_ulpq_clear_pd(ulpq) || ev) -		sk->sk_data_ready(sk, 0); +		sk->sk_data_ready(sk);  }  | 
