summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorThomas Graf <tgraf@infradead.org>2011-07-08 04:37:46 +0000
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-01-16 16:45:06 -0500
commit3f1562900a805d9555b832c6224caafbc2c6f068 (patch)
tree6dd816f7fa30a9456328c146c9dc409aff524766 /net
parent6481bb37fc7ad1f4772014c97f7e054ae9a08981 (diff)
sctp: ABORT if receive, reassmbly, or reodering queue is not empty while closing socket
commit cd4fcc704f30f2064ab30b5300d44d431e46db50 upstream. Trigger user ABORT if application closes a socket which has data queued on the socket receive queue or chunks waiting on the reassembly or ordering queue as this would imply data being lost which defeats the point of a graceful shutdown. This behavior is already practiced in TCP. We do not check the input queue because that would mean to parse all chunks on it to look for unacknowledged data which seems too much of an effort. Control chunks or duplicated chunks may also be in the input queue and should not be stopping a graceful shutdown. Signed-off-by: Thomas Graf <tgraf@infradead.org> Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'net')
-rw-r--r--net/sctp/socket.c13
-rw-r--r--net/sctp/ulpevent.c16
2 files changed, 21 insertions, 8 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 22631c11a19..03daceb2d9a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1373,6 +1373,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
+ unsigned int data_was_unread;
SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
@@ -1382,6 +1383,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
ep = sctp_sk(sk)->ep;
+ /* Clean up any skbs sitting on the receive queue. */
+ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+ data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1399,7 +1404,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
}
}
- if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+ if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+ !skb_queue_empty(&asoc->ulpq.reasm) ||
+ (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1409,10 +1416,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
sctp_primitive_SHUTDOWN(asoc, NULL);
}
- /* Clean up any skbs sitting on the receive queue. */
- sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
- sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
-
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index aa72e89c3ee..3b81c62f968 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -1053,9 +1053,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
}
/* Purge the skb lists holding ulpevents. */
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(list)) != NULL)
- sctp_ulpevent_free(sctp_skb2event(skb));
+ unsigned int data_unread = 0;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ if (!sctp_ulpevent_is_notification(event))
+ data_unread += skb->len;
+
+ sctp_ulpevent_free(event);
+ }
+
+ return data_unread;
}