aboutsummaryrefslogtreecommitdiff
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-04-20 13:03:51 +0000
committerDavid S. Miller <davem@davemloft.net>2010-04-20 16:37:13 -0700
commitaa395145165cb06a0d0885221bbe0ce4a564391d (patch)
tree118b0403621f10db8dc3dbf12079f9af5b19e05d /net/sctp/socket.c
parentab9304717f7624c41927f442e6b6d418b2d8b3e4 (diff)
net: sk_sleep() helper
Define a new function to return the waitqueue of a "struct sock". static inline wait_queue_head_t *sk_sleep(struct sock *sk) { return sk->sk_sleep; } Change all read occurrences of sk_sleep by a call to this function. Needed for a future RCU conversion. sk_sleep wont be a field directly available. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c1941276f6e..f34adcca8a8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5702,7 +5702,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sctp_sock *sp = sctp_sk(sk);
unsigned int mask;
- poll_wait(file, sk->sk_sleep, wait);
+ poll_wait(file, sk_sleep(sk), wait);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
@@ -5943,7 +5943,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
int error;
DEFINE_WAIT(wait);
- prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
@@ -5980,14 +5980,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
sctp_lock_sock(sk);
ready:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return 0;
interrupted:
error = sock_intr_errno(*timeo_p);
out:
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
*err = error;
return error;
}
@@ -6061,8 +6061,8 @@ static void __sctp_write_space(struct sctp_association *asoc)
wake_up_interruptible(&asoc->wait);
if (sctp_writeable(sk)) {
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
@@ -6296,7 +6296,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
for (;;) {
- prepare_to_wait_exclusive(sk->sk_sleep, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
@@ -6322,7 +6322,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
break;
}
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
return err;
}
@@ -6332,7 +6332,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
DEFINE_WAIT(wait);
do {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
sctp_release_sock(sk);
@@ -6340,7 +6340,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
sctp_lock_sock(sk);
} while (!signal_pending(current) && timeout);
- finish_wait(sk->sk_sleep, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)