diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2006-05-05 17:02:09 -0700 |
---|---|---|
committer | Chris Wright <chrisw@sous-sol.org> | 2006-05-09 12:53:21 -0700 |
commit | 2e2a2cd09dd7b3fbc99a1879a54090fd6db16f0c (patch) | |
tree | 0ec7e0b342f783c4a6ebe9ed083ddec41182041f /net/sctp | |
parent | cb19baa0bb7a4064e6d0c99e8f479673120a9f28 (diff) |
[PATCH] SCTP: Allow spillover of receive buffer to avoid deadlock. (CVE-2006-2275)
This patch fixes a deadlock situation in the receive path by allowing
temporary spillover of the receive buffer.
- If the chunk we receive has a tsn that immediately follows the ctsn,
accept it even if we run out of receive buffer space and renege data with
higher TSNs.
- Once we accept one chunk in a packet, accept all the remaining chunks
even if we run out of receive buffer space.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Mark Butler <butlerm@middle.net>
Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/inqueue.c | 1 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 46 |
2 files changed, 37 insertions, 10 deletions
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 297b8951463..cf0c767d43a 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) /* This is the first chunk in the packet. */ chunk->singleton = 1; ch = (sctp_chunkhdr_t *) chunk->skb->data; + chunk->data_accepted = 0; } chunk->chunk_hdr = ch; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index db2c0e74afd..8cdba51ec07 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -5154,7 +5154,9 @@ static int sctp_eat_data(const struct sctp_association *asoc, int tmp; __u32 tsn; int account_value; + struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sock *sk = asoc->base.sk; + int rcvbuf_over = 0; data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); @@ -5165,10 +5167,16 @@ static int sctp_eat_data(const struct sctp_association *asoc, /* ASSERT: Now skb->data is really the user data. */ /* - * if we are established, and we have used up our receive - * buffer memory, drop the frame - */ - if (asoc->state == SCTP_STATE_ESTABLISHED) { + * If we are established, and we have used up our receive buffer + * memory, think about droping the frame. + * Note that we have an opportunity to improve performance here. + * If we accept one chunk from an skbuff, we have to keep all the + * memory of that skbuff around until the chunk is read into user + * space. Therefore, once we accept 1 chunk we may as well accept all + * remaining chunks in the skbuff. The data_accepted flag helps us do + * that. + */ + if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) { /* * If the receive buffer policy is 1, then each * association can allocate up to sk_rcvbuf bytes @@ -5179,9 +5187,25 @@ static int sctp_eat_data(const struct sctp_association *asoc, account_value = atomic_read(&asoc->rmem_alloc); else account_value = atomic_read(&sk->sk_rmem_alloc); - - if (account_value > sk->sk_rcvbuf) - return SCTP_IERROR_IGNORE_TSN; + if (account_value > sk->sk_rcvbuf) { + /* + * We need to make forward progress, even when we are + * under memory pressure, so we always allow the + * next tsn after the ctsn ack point to be accepted. + * This lets us avoid deadlocks in which we have to + * drop frames that would otherwise let us drain the + * receive queue. + */ + if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn) + return SCTP_IERROR_IGNORE_TSN; + + /* + * We're going to accept the frame but we should renege + * to make space for it. This will send us down that + * path later in this function. + */ + rcvbuf_over = 1; + } } /* Process ECN based congestion. @@ -5229,6 +5253,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, datalen -= sizeof(sctp_data_chunk_t); deliver = SCTP_CMD_CHUNK_ULP; + chunk->data_accepted = 1; /* Think about partial delivery. */ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { @@ -5245,7 +5270,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, * large spill over. */ if (!asoc->rwnd || asoc->rwnd_over || - (datalen > asoc->rwnd + asoc->frag_point)) { + (datalen > asoc->rwnd + asoc->frag_point) || + rcvbuf_over) { /* If this is the next TSN, consider reneging to make * room. Note: Playing nice with a confused sender. A @@ -5253,8 +5279,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, * space and in the future we may want to detect and * do more drastic reneging. */ - if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && - (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { + if (sctp_tsnmap_has_gap(map) && + (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); deliver = SCTP_CMD_RENEGE; } else { |