aboutsummaryrefslogtreecommitdiff
path: root/net/unix
diff options
context:
space:
mode:
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/Kconfig7
-rw-r--r--net/unix/Makefile3
-rw-r--r--net/unix/af_unix.c1536
-rw-r--r--net/unix/diag.c327
-rw-r--r--net/unix/garbage.c378
-rw-r--r--net/unix/sysctl_net_unix.c61
6 files changed, 1570 insertions, 742 deletions
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 5a69733bcda..8b31ab85d05 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -19,3 +19,10 @@ config UNIX
Say Y unless you know what you are doing.
+config UNIX_DIAG
+ tristate "UNIX: socket monitoring interface"
+ depends on UNIX
+ default n
+ ---help---
+ Support for UNIX socket monitoring interface used by the ss tool.
+ If unsure, say Y.
diff --git a/net/unix/Makefile b/net/unix/Makefile
index b852a2bde9a..b663c607b1c 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -6,3 +6,6 @@ obj-$(CONFIG_UNIX) += unix.o
unix-y := af_unix.o garbage.o
unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o
+
+obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
+unix_diag-y := diag.o
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 41feca3bef8..e9688438073 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1,15 +1,13 @@
/*
* NET4: Implementation of BSD Unix domain sockets.
*
- * Authors: Alan Cox, <alan.cox@linux.org>
+ * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
- *
* Fixes:
* Linus Torvalds : Assorted bug cures.
* Niibe Yutaka : async I/O support.
@@ -82,8 +80,9 @@
* with BSD names.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -104,6 +103,7 @@
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/af_unix.h>
@@ -112,31 +112,59 @@
#include <net/scm.h>
#include <linux/init.h>
#include <linux/poll.h>
-#include <linux/smp_lock.h>
#include <linux/rtnetlink.h>
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+#include <linux/freezer.h>
+
+struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
+EXPORT_SYMBOL_GPL(unix_socket_table);
+DEFINE_SPINLOCK(unix_table_lock);
+EXPORT_SYMBOL_GPL(unix_table_lock);
+static atomic_long_t unix_nr_socks;
-int sysctl_unix_max_dgram_qlen = 10;
-struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
-DEFINE_RWLOCK(unix_table_lock);
-static atomic_t unix_nr_socks = ATOMIC_INIT(0);
+static struct hlist_head *unix_sockets_unbound(void *addr)
+{
+ unsigned long hash = (unsigned long)addr;
+
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+ hash %= UNIX_HASH_SIZE;
+ return &unix_socket_table[UNIX_HASH_SIZE + hash];
+}
-#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
+#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
-#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
+#ifdef CONFIG_SECURITY_NETWORK
+static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
+}
+
+static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ scm->secid = *UNIXSID(skb);
+}
+#else
+static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{ }
+
+static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+{ }
+#endif /* CONFIG_SECURITY_NETWORK */
/*
* SMP locking strategy:
- * hash table is protected with rwlock unix_table_lock
- * each socket state is protected by separate rwlock.
+ * hash table is protected with spinlock unix_table_lock
+ * each socket state is protected by separate spin lock.
*/
-static inline unsigned unix_hash_fold(unsigned hash)
+static inline unsigned int unix_hash_fold(__wsum n)
{
- hash ^= hash>>16;
+ unsigned int hash = (__force unsigned int)csum_fold(n);
+
hash ^= hash>>8;
return hash&(UNIX_HASH_SIZE-1);
}
@@ -150,20 +178,26 @@ static inline int unix_our_peer(struct sock *sk, struct sock *osk)
static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
- return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
+ return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
+}
+
+static inline int unix_recvq_full(struct sock const *sk)
+{
+ return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
-static struct sock *unix_peer_get(struct sock *s)
+struct sock *unix_peer_get(struct sock *s)
{
struct sock *peer;
- unix_state_rlock(s);
+ unix_state_lock(s);
peer = unix_peer(s);
if (peer)
sock_hold(peer);
- unix_state_runlock(s);
+ unix_state_unlock(s);
return peer;
}
+EXPORT_SYMBOL_GPL(unix_peer_get);
static inline void unix_release_addr(struct unix_address *addr)
{
@@ -177,8 +211,8 @@ static inline void unix_release_addr(struct unix_address *addr)
* - if started by not zero, should be NULL terminated (FS object)
* - if started by zero, it is abstract name.
*/
-
-static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
+
+static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
{
if (len <= sizeof(short) || len > sizeof(*sunaddr))
return -EINVAL;
@@ -188,16 +222,16 @@ static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
/*
* This may look like an off by one error but it is a bit more
* subtle. 108 is the longest valid AF_UNIX path for a binding.
- * sun_path[108] doesnt as such exist. However in kernel space
+ * sun_path[108] doesn't as such exist. However in kernel space
* we are guaranteed that it is a valid memory location in our
* kernel address buffer.
*/
- ((char *)sunaddr)[len]=0;
+ ((char *)sunaddr)[len] = 0;
len = strlen(sunaddr->sun_path)+1+sizeof(short);
return len;
}
- *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
+ *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
return len;
}
@@ -208,33 +242,36 @@ static void __unix_remove_socket(struct sock *sk)
static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
sk_add_node(sk, list);
}
static inline void unix_remove_socket(struct sock *sk)
{
- write_lock(&unix_table_lock);
+ spin_lock(&unix_table_lock);
__unix_remove_socket(sk);
- write_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_lock);
}
static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
- write_lock(&unix_table_lock);
+ spin_lock(&unix_table_lock);
__unix_insert_socket(list, sk);
- write_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_lock);
}
-static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
- int len, int type, unsigned hash)
+static struct sock *__unix_find_socket_byname(struct net *net,
+ struct sockaddr_un *sunname,
+ int len, int type, unsigned int hash)
{
struct sock *s;
- struct hlist_node *node;
- sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
+ sk_for_each(s, &unix_socket_table[hash ^ type]) {
struct unix_sock *u = unix_sk(s);
+ if (!net_eq(sock_net(s), net))
+ continue;
+
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
goto found;
@@ -244,39 +281,38 @@ found:
return s;
}
-static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
+static inline struct sock *unix_find_socket_byname(struct net *net,
+ struct sockaddr_un *sunname,
int len, int type,
- unsigned hash)
+ unsigned int hash)
{
struct sock *s;
- read_lock(&unix_table_lock);
- s = __unix_find_socket_byname(sunname, len, type, hash);
+ spin_lock(&unix_table_lock);
+ s = __unix_find_socket_byname(net, sunname, len, type, hash);
if (s)
sock_hold(s);
- read_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_lock);
return s;
}
static struct sock *unix_find_socket_byinode(struct inode *i)
{
struct sock *s;
- struct hlist_node *node;
- read_lock(&unix_table_lock);
- sk_for_each(s, node,
+ spin_lock(&unix_table_lock);
+ sk_for_each(s,
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
- struct dentry *dentry = unix_sk(s)->dentry;
+ struct dentry *dentry = unix_sk(s)->path.dentry;
- if(dentry && dentry->d_inode == i)
- {
+ if (dentry && dentry->d_inode == i) {
sock_hold(s);
goto found;
}
}
s = NULL;
found:
- read_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_lock);
return s;
}
@@ -287,13 +323,17 @@ static inline int unix_writable(struct sock *sk)
static void unix_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (unix_writable(sk)) {
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
- sk_wake_async(sk, 2, POLL_OUT);
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait,
+ POLLOUT | POLLWRNORM | POLLWRBAND);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -323,28 +363,31 @@ static void unix_sock_destructor(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
- BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
- BUG_TRAP(sk_unhashed(sk));
- BUG_TRAP(!sk->sk_socket);
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(!sk_unhashed(sk));
+ WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk("Attempt to release alive unix socket: %p\n", sk);
+ pr_info("Attempt to release alive unix socket: %p\n", sk);
return;
}
if (u->addr)
unix_release_addr(u->addr);
- atomic_dec(&unix_nr_socks);
+ atomic_long_dec(&unix_nr_socks);
+ local_bh_disable();
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
+ pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
+ atomic_long_read(&unix_nr_socks));
#endif
}
-static int unix_release_sock (struct sock *sk, int embrion)
+static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
- struct dentry *dentry;
- struct vfsmount *mnt;
+ struct path path;
struct sock *skpair;
struct sk_buff *skb;
int state;
@@ -352,33 +395,30 @@ static int unix_release_sock (struct sock *sk, int embrion)
unix_remove_socket(sk);
/* Clear state */
- unix_state_wlock(sk);
+ unix_state_lock(sk);
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
- dentry = u->dentry;
- u->dentry = NULL;
- mnt = u->mnt;
- u->mnt = NULL;
+ path = u->path;
+ u->path.dentry = NULL;
+ u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
- unix_state_wunlock(sk);
+ unix_state_unlock(sk);
wake_up_interruptible_all(&u->peer_wait);
- skpair=unix_peer(sk);
+ skpair = unix_peer(sk);
- if (skpair!=NULL) {
+ if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
- unix_state_wlock(skpair);
+ unix_state_lock(skpair);
/* No more writes */
skpair->sk_shutdown = SHUTDOWN_MASK;
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
skpair->sk_err = ECONNRESET;
- unix_state_wunlock(skpair);
+ unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
- read_lock(&skpair->sk_callback_lock);
- sk_wake_async(skpair,1,POLL_HUP);
- read_unlock(&skpair->sk_callback_lock);
+ sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
}
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
@@ -387,23 +427,21 @@ static int unix_release_sock (struct sock *sk, int embrion)
/* Try to flush out this socket. Throw out buffers at least */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- if (state==TCP_LISTEN)
+ if (state == TCP_LISTEN)
unix_release_sock(skb->sk, 1);
/* passed fds are erased in the kfree_skb hook */
kfree_skb(skb);
}
- if (dentry) {
- dput(dentry);
- mntput(mnt);
- }
+ if (path.dentry)
+ path_put(&path);
sock_put(sk);
/* ---- Socket is dead now and most probably destroyed ---- */
/*
- * Fixme: BSD difference: In BSD all sockets connected to use get
+ * Fixme: BSD difference: In BSD all sockets connected to us get
* ECONNRESET and we die on the spot. In Linux we behave
* like files and pipes do and wait for the last
* dereference.
@@ -413,10 +451,26 @@ static int unix_release_sock (struct sock *sk, int embrion)
* What the above comment does talk about? --ANK(980817)
*/
- if (atomic_read(&unix_tot_inflight))
- unix_gc(); /* Garbage collect fds */
+ if (unix_tot_inflight)
+ unix_gc(); /* Garbage collect fds */
+}
- return 0;
+static void init_peercred(struct sock *sk)
+{
+ put_pid(sk->sk_peer_pid);
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ sk->sk_peer_cred = get_current_cred();
+}
+
+static void copy_peercred(struct sock *sk, struct sock *peersk)
+{
+ put_pid(sk->sk_peer_pid);
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
}
static int unix_listen(struct socket *sock, int backlog)
@@ -424,14 +478,15 @@ static int unix_listen(struct socket *sock, int backlog)
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
+ struct pid *old_pid = NULL;
err = -EOPNOTSUPP;
- if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
- goto out; /* Only stream/seqpacket sockets accept */
+ if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
+ goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
if (!u->addr)
- goto out; /* No listens on an unbound socket */
- unix_state_wlock(sk);
+ goto out; /* No listens on an unbound socket */
+ unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
if (backlog > sk->sk_max_ack_backlog)
@@ -439,13 +494,12 @@ static int unix_listen(struct socket *sock, int backlog)
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
- sk->sk_peercred.pid = current->tgid;
- sk->sk_peercred.uid = current->euid;
- sk->sk_peercred.gid = current->egid;
+ init_peercred(sk);
err = 0;
out_unlock:
- unix_state_wunlock(sk);
+ unix_state_unlock(sk);
+ put_pid(old_pid);
out:
return err;
}
@@ -458,6 +512,8 @@ static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
+static unsigned int unix_dgram_poll(struct file *, struct socket *,
+ poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct kiocb *, struct socket *,
@@ -472,8 +528,24 @@ static int unix_dgram_connect(struct socket *, struct sockaddr *,
int, int);
static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t);
+static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
+ struct msghdr *, size_t, int);
+
+static int unix_set_peek_off(struct sock *sk, int val)
+{
+ struct unix_sock *u = unix_sk(sk);
+
+ if (mutex_lock_interruptible(&u->readlock))
+ return -EINTR;
-static struct proto_ops unix_stream_ops = {
+ sk->sk_peek_off = val;
+ mutex_unlock(&u->readlock);
+
+ return 0;
+}
+
+
+static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
@@ -492,9 +564,10 @@ static struct proto_ops unix_stream_ops = {
.recvmsg = unix_stream_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
+ .set_peek_off = unix_set_peek_off,
};
-static struct proto_ops unix_dgram_ops = {
+static const struct proto_ops unix_dgram_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
@@ -503,7 +576,7 @@ static struct proto_ops unix_dgram_ops = {
.socketpair = unix_socketpair,
.accept = sock_no_accept,
.getname = unix_getname,
- .poll = datagram_poll,
+ .poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = sock_no_listen,
.shutdown = unix_shutdown,
@@ -513,9 +586,10 @@ static struct proto_ops unix_dgram_ops = {
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
+ .set_peek_off = unix_set_peek_off,
};
-static struct proto_ops unix_seqpacket_ops = {
+static const struct proto_ops unix_seqpacket_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
@@ -524,56 +598,75 @@ static struct proto_ops unix_seqpacket_ops = {
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
- .poll = datagram_poll,
+ .poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_seqpacket_sendmsg,
- .recvmsg = unix_dgram_recvmsg,
+ .recvmsg = unix_seqpacket_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
+ .set_peek_off = unix_set_peek_off,
};
static struct proto unix_proto = {
- .name = "UNIX",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct unix_sock),
+ .name = "UNIX",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct unix_sock),
};
-static struct sock * unix_create1(struct socket *sock)
+/*
+ * AF_UNIX sockets do not interact with hardware, hence they
+ * dont trigger interrupts - so it's safe for them to have
+ * bh-unsafe locking for their sk_receive_queue.lock. Split off
+ * this special lock-class by reinitializing the spinlock key:
+ */
+static struct lock_class_key af_unix_sk_receive_queue_lock_key;
+
+static struct sock *unix_create1(struct net *net, struct socket *sock)
{
struct sock *sk = NULL;
struct unix_sock *u;
- if (atomic_read(&unix_nr_socks) >= 2*files_stat.max_files)
+ atomic_long_inc(&unix_nr_socks);
+ if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
goto out;
- sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
+ sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
if (!sk)
goto out;
- atomic_inc(&unix_nr_socks);
-
- sock_init_data(sock,sk);
+ sock_init_data(sock, sk);
+ lockdep_set_class(&sk->sk_receive_queue.lock,
+ &af_unix_sk_receive_queue_lock_key);
sk->sk_write_space = unix_write_space;
- sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
+ sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
- u->dentry = NULL;
- u->mnt = NULL;
- rwlock_init(&u->lock);
- atomic_set(&u->inflight, sock ? 0 : -1);
- init_MUTEX(&u->readsem); /* single task reading lock */
+ u->path.dentry = NULL;
+ u->path.mnt = NULL;
+ spin_lock_init(&u->lock);
+ atomic_long_set(&u->inflight, 0);
+ INIT_LIST_HEAD(&u->link);
+ mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
- unix_insert_socket(unix_sockets_unbound, sk);
+ unix_insert_socket(unix_sockets_unbound(sk), sk);
out:
+ if (sk == NULL)
+ atomic_long_dec(&unix_nr_socks);
+ else {
+ local_bh_disable();
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ local_bh_enable();
+ }
return sk;
}
-static int unix_create(struct socket *sock, int protocol)
+static int unix_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
if (protocol && protocol != PF_UNIX)
return -EPROTONOSUPPORT;
@@ -589,7 +682,7 @@ static int unix_create(struct socket *sock, int protocol)
* nothing uses it.
*/
case SOCK_RAW:
- sock->type=SOCK_DGRAM;
+ sock->type = SOCK_DGRAM;
case SOCK_DGRAM:
sock->ops = &unix_dgram_ops;
break;
@@ -600,7 +693,7 @@ static int unix_create(struct socket *sock, int protocol)
return -ESOCKTNOSUPPORT;
}
- return unix_create1(sock) ? 0 : -ENOMEM;
+ return unix_create1(net, sock) ? 0 : -ENOMEM;
}
static int unix_release(struct socket *sock)
@@ -610,47 +703,59 @@ static int unix_release(struct socket *sock)
if (!sk)
return 0;
+ unix_release_sock(sk, 0);
sock->sk = NULL;
- return unix_release_sock (sk, 0);
+ return 0;
}
static int unix_autobind(struct socket *sock)
{
struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
static u32 ordernum = 1;
- struct unix_address * addr;
+ struct unix_address *addr;
int err;
+ unsigned int retries = 0;
- down(&u->readsem);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ return err;
err = 0;
if (u->addr)
goto out;
err = -ENOMEM;
- addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
+ addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
if (!addr)
goto out;
- memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
addr->name->sun_family = AF_UNIX;
atomic_set(&addr->refcnt, 1);
retry:
addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
- addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
+ addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
- write_lock(&unix_table_lock);
+ spin_lock(&unix_table_lock);
ordernum = (ordernum+1)&0xFFFFF;
- if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
+ if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
addr->hash)) {
- write_unlock(&unix_table_lock);
- /* Sanity yield. It is unusual case, but yet... */
- if (!(ordernum&0xFF))
- yield();
+ spin_unlock(&unix_table_lock);
+ /*
+ * __unix_find_socket_byname() may take long time if many names
+ * are already in use.
+ */
+ cond_resched();
+ /* Give up if all names seems to be in use. */
+ if (retries++ == 0xFFFFF) {
+ err = -ENOSPC;
+ kfree(addr);
+ goto out;
+ }
goto retry;
}
addr->hash ^= sk->sk_type;
@@ -658,75 +763,106 @@ retry:
__unix_remove_socket(sk);
u->addr = addr;
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
- write_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_lock);
err = 0;
-out: up(&u->readsem);
+out: mutex_unlock(&u->readlock);
return err;
}
-static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
- int type, unsigned hash, int *error)
+static struct sock *unix_find_other(struct net *net,
+ struct sockaddr_un *sunname, int len,
+ int type, unsigned int hash, int *error)
{
struct sock *u;
- struct nameidata nd;
+ struct path path;
int err = 0;
-
+
if (sunname->sun_path[0]) {
- err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
+ struct inode *inode;
+ err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
- err = permission(nd.dentry->d_inode,MAY_WRITE, &nd);
+ inode = path.dentry->d_inode;
+ err = inode_permission(inode, MAY_WRITE);
if (err)
goto put_fail;
err = -ECONNREFUSED;
- if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
+ if (!S_ISSOCK(inode->i_mode))
goto put_fail;
- u=unix_find_socket_byinode(nd.dentry->d_inode);
+ u = unix_find_socket_byinode(inode);
if (!u)
goto put_fail;
if (u->sk_type == type)
- touch_atime(nd.mnt, nd.dentry);
+ touch_atime(&path);
- path_release(&nd);
+ path_put(&path);
- err=-EPROTOTYPE;
+ err = -EPROTOTYPE;
if (u->sk_type != type) {
sock_put(u);
goto fail;
}
} else {
err = -ECONNREFUSED;
- u=unix_find_socket_byname(sunname, len, type, hash);
+ u = unix_find_socket_byname(net, sunname, len, type, hash);
if (u) {
struct dentry *dentry;
- dentry = unix_sk(u)->dentry;
+ dentry = unix_sk(u)->path.dentry;
if (dentry)
- touch_atime(unix_sk(u)->mnt, dentry);
+ touch_atime(&unix_sk(u)->path);
} else
goto fail;
}
return u;
put_fail:
- path_release(&nd);
+ path_put(&path);
fail:
- *error=err;
+ *error = err;
return NULL;
}
+static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+{
+ struct dentry *dentry;
+ struct path path;
+ int err = 0;
+ /*
+ * Get the parent directory, calculate the hash for last
+ * component.
+ */
+ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+ err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ return err;
+
+ /*
+ * All right, let's create it.
+ */
+ err = security_path_mknod(&path, dentry, mode, 0);
+ if (!err) {
+ err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
+ if (!err) {
+ res->mnt = mntget(path.mnt);
+ res->dentry = dget(dentry);
+ }
+ }
+ done_path_create(&path, dentry);
+ return err;
+}
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
- struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
- struct dentry * dentry = NULL;
- struct nameidata nd;
+ struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
+ char *sun_path = sunaddr->sun_path;
int err;
- unsigned hash;
+ unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
@@ -734,7 +870,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (sunaddr->sun_family != AF_UNIX)
goto out;
- if (addr_len==sizeof(short)) {
+ if (addr_len == sizeof(short)) {
err = unix_autobind(sock);
goto out;
}
@@ -744,7 +880,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
- down(&u->readsem);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ goto out;
err = -EINVAL;
if (u->addr)
@@ -760,52 +898,32 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
- if (sunaddr->sun_path[0]) {
- unsigned int mode;
- err = 0;
- /*
- * Get the parent directory, calculate the hash for last
- * component.
- */
- err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
- if (err)
- goto out_mknod_parent;
-
- dentry = lookup_create(&nd, 0);
- err = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out_mknod_unlock;
-
- /*
- * All right, let's create it.
- */
- mode = S_IFSOCK |
- (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
- err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
- if (err)
- goto out_mknod_dput;
- up(&nd.dentry->d_inode->i_sem);
- dput(nd.dentry);
- nd.dentry = dentry;
-
+ if (sun_path[0]) {
+ struct path path;
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current_umask());
+ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+ unix_release_addr(addr);
+ goto out_up;
+ }
addr->hash = UNIX_HASH_SIZE;
- }
-
- write_lock(&unix_table_lock);
-
- if (!sunaddr->sun_path[0]) {
+ hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
+ spin_lock(&unix_table_lock);
+ u->path = path;
+ list = &unix_socket_table[hash];
+ } else {
+ spin_lock(&unix_table_lock);
err = -EADDRINUSE;
- if (__unix_find_socket_byname(sunaddr, addr_len,
+ if (__unix_find_socket_byname(net, sunaddr, addr_len,
sk->sk_type, hash)) {
unix_release_addr(addr);
goto out_unlock;
}
list = &unix_socket_table[addr->hash];
- } else {
- list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
- u->dentry = nd.dentry;
- u->mnt = nd.mnt;
}
err = 0;
@@ -814,31 +932,46 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
__unix_insert_socket(list, sk);
out_unlock:
- write_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_lock);
out_up:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
+}
-out_mknod_dput:
- dput(dentry);
-out_mknod_unlock:
- up(&nd.dentry->d_inode->i_sem);
- path_release(&nd);
-out_mknod_parent:
- if (err==-EEXIST)
- err=-EADDRINUSE;
- unix_release_addr(addr);
- goto out_up;
+static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
+{
+ if (unlikely(sk1 == sk2) || !sk2) {
+ unix_state_lock(sk1);
+ return;
+ }
+ if (sk1 < sk2) {
+ unix_state_lock(sk1);
+ unix_state_lock_nested(sk2);
+ } else {
+ unix_state_lock(sk2);
+ unix_state_lock_nested(sk1);
+ }
+}
+
+static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
+{
+ if (unlikely(sk1 == sk2) || !sk2) {
+ unix_state_unlock(sk1);
+ return;
+ }
+ unix_state_unlock(sk1);
+ unix_state_unlock(sk2);
}
static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
struct sock *sk = sock->sk;
- struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
+ struct net *net = sock_net(sk);
+ struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
struct sock *other;
- unsigned hash;
+ unsigned int hash;
int err;
if (addr->sa_family != AF_UNSPEC) {
@@ -851,11 +984,19 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
!unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
goto out;
- other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
+restart:
+ other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
if (!other)
goto out;
- unix_state_wlock(sk);
+ unix_state_double_lock(sk, other);
+
+ /* Apparently VFS overslept socket death. Retry. */
+ if (sock_flag(other, SOCK_DEAD)) {
+ unix_state_double_unlock(sk, other);
+ sock_put(other);
+ goto restart;
+ }
err = -EPERM;
if (!unix_may_send(sk, other))
@@ -870,7 +1011,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
* 1003.1g breaking connected state with AF_UNSPEC
*/
other = NULL;
- unix_state_wlock(sk);
+ unix_state_double_lock(sk, other);
}
/*
@@ -878,20 +1019,20 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
*/
if (unix_peer(sk)) {
struct sock *old_peer = unix_peer(sk);
- unix_peer(sk)=other;
- unix_state_wunlock(sk);
+ unix_peer(sk) = other;
+ unix_state_double_unlock(sk, other);
if (other != old_peer)
unix_dgram_disconnected(sk, old_peer);
sock_put(old_peer);
} else {
- unix_peer(sk)=other;
- unix_state_wunlock(sk);
+ unix_peer(sk) = other;
+ unix_state_double_unlock(sk, other);
}
- return 0;
+ return 0;
out_unlock:
- unix_state_wunlock(sk);
+ unix_state_double_unlock(sk, other);
sock_put(other);
out:
return err;
@@ -907,10 +1048,9 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) &&
- (skb_queue_len(&other->sk_receive_queue) >
- other->sk_max_ack_backlog);
+ unix_recvq_full(other);
- unix_state_runlock(other);
+ unix_state_unlock(other);
if (sched)
timeo = schedule_timeout(timeo);
@@ -922,13 +1062,14 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
- struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
+ struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
struct sock *newsk = NULL;
struct sock *other = NULL;
struct sk_buff *skb = NULL;
- unsigned hash;
+ unsigned int hash;
int st;
int err;
long timeo;
@@ -938,8 +1079,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
addr_len = err;
- if (test_bit(SOCK_PASSCRED, &sock->flags)
- && !u->addr && (err = unix_autobind(sock)) != 0)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
+ (err = unix_autobind(sock)) != 0)
goto out;
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
@@ -952,7 +1093,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
err = -ENOMEM;
/* create new sock for complete connection */
- newsk = unix_create1(NULL);
+ newsk = unix_create1(sock_net(sk), NULL);
if (newsk == NULL)
goto out;
@@ -963,16 +1104,16 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
restart:
/* Find listening sock. */
- other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
+ other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
if (!other)
goto out;
/* Latch state of peer */
- unix_state_rlock(other);
+ unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */
if (sock_flag(other, SOCK_DEAD)) {
- unix_state_runlock(other);
+ unix_state_unlock(other);
sock_put(other);
goto restart;
}
@@ -980,9 +1121,10 @@ restart:
err = -ECONNREFUSED;
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
+ if (other->sk_shutdown & RCV_SHUTDOWN)
+ goto out_unlock;
- if (skb_queue_len(&other->sk_receive_queue) >
- other->sk_max_ack_backlog) {
+ if (unix_recvq_full(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
@@ -994,11 +1136,11 @@ restart:
goto out;
sock_put(other);
goto restart;
- }
+ }
/* Latch our state.
- It is tricky place. We need to grab write lock and cannot
+ It is tricky place. We need to grab our state lock and cannot
drop lock on peer. It is dangerous because deadlock is
possible. Connect to self case and simultaneous
attempt to connect are eliminated by checking socket
@@ -1022,18 +1164,18 @@ restart:
goto out_unlock;
}
- unix_state_wlock(sk);
+ unix_state_lock_nested(sk);
if (sk->sk_state != st) {
- unix_state_wunlock(sk);
- unix_state_runlock(other);
+ unix_state_unlock(sk);
+ unix_state_unlock(other);
sock_put(other);
goto restart;
}
- err = security_unix_stream_connect(sock, other->sk_socket, newsk);
+ err = security_unix_stream_connect(sk, other, newsk);
if (err) {
- unix_state_wunlock(sk);
+ unix_state_unlock(sk);
goto out_unlock;
}
@@ -1043,11 +1185,9 @@ restart:
unix_peer(newsk) = sk;
newsk->sk_state = TCP_ESTABLISHED;
newsk->sk_type = sk->sk_type;
- newsk->sk_peercred.pid = current->tgid;
- newsk->sk_peercred.uid = current->euid;
- newsk->sk_peercred.gid = current->egid;
+ init_peercred(newsk);
newu = unix_sk(newsk);
- newsk->sk_sleep = &newu->peer_wait;
+ RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1055,40 +1195,38 @@ restart:
atomic_inc(&otheru->addr->refcnt);
newu->addr = otheru->addr;
}
- if (otheru->dentry) {
- newu->dentry = dget(otheru->dentry);
- newu->mnt = mntget(otheru->mnt);
+ if (otheru->path.dentry) {
+ path_get(&otheru->path);
+ newu->path = otheru->path;
}
/* Set credentials */
- sk->sk_peercred = other->sk_peercred;
+ copy_peercred(sk, other);
- sock_hold(newsk);
- unix_peer(sk) = newsk;
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
+ sock_hold(newsk);
- unix_state_wunlock(sk);
+ smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
+ unix_peer(sk) = newsk;
+
+ unix_state_unlock(sk);
/* take ten and and send info to listening sock */
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, skb);
- /* Undo artificially decreased inflight after embrion
- * is installed to listening socket. */
- atomic_inc(&newu->inflight);
spin_unlock(&other->sk_receive_queue.lock);
- unix_state_runlock(other);
- other->sk_data_ready(other, 0);
+ unix_state_unlock(other);
+ other->sk_data_ready(other);
sock_put(other);
return 0;
out_unlock:
if (other)
- unix_state_runlock(other);
+ unix_state_unlock(other);
out:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
if (newsk)
unix_release_sock(newsk, 0);
if (other)
@@ -1098,16 +1236,15 @@ out:
static int unix_socketpair(struct socket *socka, struct socket *sockb)
{
- struct sock *ska=socka->sk, *skb = sockb->sk;
+ struct sock *ska = socka->sk, *skb = sockb->sk;
/* Join our sockets back to back */
sock_hold(ska);
sock_hold(skb);
- unix_peer(ska)=skb;
- unix_peer(skb)=ska;
- ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
- ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
- ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
+ unix_peer(ska) = skb;
+ unix_peer(skb) = ska;
+ init_peercred(ska);
+ init_peercred(skb);
if (ska->sk_type != SOCK_DGRAM) {
ska->sk_state = TCP_ESTABLISHED;
@@ -1118,6 +1255,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
return 0;
}
+static void unix_sock_inherit_flags(const struct socket *old,
+ struct socket *new)
+{
+ if (test_bit(SOCK_PASSCRED, &old->flags))
+ set_bit(SOCK_PASSCRED, &new->flags);
+ if (test_bit(SOCK_PASSSEC, &old->flags))
+ set_bit(SOCK_PASSSEC, &new->flags);
+}
+
static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
@@ -1126,7 +1272,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
int err;
err = -EOPNOTSUPP;
- if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
+ if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out;
err = -EINVAL;
@@ -1150,10 +1296,11 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
wake_up_interruptible(&unix_sk(sk)->peer_wait);
/* attach accepted sock to socket */
- unix_state_wlock(tsk);
+ unix_state_lock(tsk);
newsock->state = SS_CONNECTED;
+ unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
- unix_state_wunlock(tsk);
+ unix_state_unlock(tsk);
return 0;
out:
@@ -1165,7 +1312,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
{
struct sock *sk = sock->sk;
struct unix_sock *u;
- struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
+ DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
if (peer) {
@@ -1180,7 +1327,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
}
u = unix_sk(sk);
- unix_state_rlock(sk);
+ unix_state_lock(sk);
if (!u->addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
@@ -1191,7 +1338,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
*uaddr_len = addr->len;
memcpy(sunaddr, addr->name, *uaddr_len);
}
- unix_state_runlock(sk);
+ unix_state_unlock(sk);
sock_put(sk);
out:
return err;
@@ -1202,18 +1349,19 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
int i;
scm->fp = UNIXCB(skb).fp;
- skb->destructor = sock_wfree;
UNIXCB(skb).fp = NULL;
- for (i=scm->fp->count-1; i>=0; i--)
+ for (i = scm->fp->count-1; i >= 0; i--)
unix_notinflight(scm->fp->fp[i]);
}
-static void unix_destruct_fds(struct sk_buff *skb)
+static void unix_destruct_scm(struct sk_buff *skb)
{
struct scm_cookie scm;
memset(&scm, 0, sizeof(scm));
- unix_detach_fds(&scm, skb);
+ scm.pid = UNIXCB(skb).pid;
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(&scm, skb);
/* Alas, it calls VFS */
/* So fscking what? fput() had been SMP-safe since the last Summer */
@@ -1221,14 +1369,73 @@ static void unix_destruct_fds(struct sk_buff *skb)
sock_wfree(skb);
}
-static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+#define MAX_RECURSION_LEVEL 4
+
+static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
- for (i=scm->fp->count-1; i>=0; i--)
- unix_inflight(scm->fp->fp[i]);
- UNIXCB(skb).fp = scm->fp;
- skb->destructor = unix_destruct_fds;
- scm->fp = NULL;
+ unsigned char max_level = 0;
+ int unix_sock_count = 0;
+
+ for (i = scm->fp->count - 1; i >= 0; i--) {
+ struct sock *sk = unix_get_socket(scm->fp->fp[i]);
+
+ if (sk) {
+ unix_sock_count++;
+ max_level = max(max_level,
+ unix_sk(sk)->recursion_level);
+ }
+ }
+ if (unlikely(max_level > MAX_RECURSION_LEVEL))
+ return -ETOOMANYREFS;
+
+ /*
+ * Need to duplicate file references for the sake of garbage
+ * collection. Otherwise a socket in the fps might become a
+ * candidate for GC while the skb is not yet queued.
+ */
+ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
+ if (!UNIXCB(skb).fp)
+ return -ENOMEM;
+
+ if (unix_sock_count) {
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_inflight(scm->fp->fp[i]);
+ }
+ return max_level;
+}
+
+static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
+{
+ int err = 0;
+
+ UNIXCB(skb).pid = get_pid(scm->pid);
+ UNIXCB(skb).uid = scm->creds.uid;
+ UNIXCB(skb).gid = scm->creds.gid;
+ UNIXCB(skb).fp = NULL;
+ if (scm->fp && send_fds)
+ err = unix_attach_fds(scm, skb);
+
+ skb->destructor = unix_destruct_scm;
+ return err;
+}
+
+/*
+ * Some apps rely on write() giving SCM_CREDENTIALS
+ * We include credentials if source or destination socket
+ * asserted SOCK_PASSCRED.
+ */
+static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
+ const struct sock *other)
+{
+ if (UNIXCB(skb).pid)
+ return;
+ if (test_bit(SOCK_PASSCRED, &sock->flags) ||
+ !other->sk_socket ||
+ test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+ UNIXCB(skb).pid = get_pid(task_tgid(current));
+ current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
+ }
}
/*
@@ -1240,19 +1447,23 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
- struct sockaddr_un *sunaddr=msg->msg_name;
+ DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
- unsigned hash;
+ unsigned int hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
+ int max_level;
+ int data_len = 0;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
- err = scm_send(sock, msg, siocb->scm);
+ wait_for_unix_gc();
+ err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
return err;
@@ -1273,24 +1484,39 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
}
- if (test_bit(SOCK_PASSCRED, &sock->flags)
- && !u->addr && (err = unix_autobind(sock)) != 0)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
+ && (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
- skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
- if (skb==NULL)
+ if (len > SKB_MAX_ALLOC) {
+ data_len = min_t(size_t,
+ len - SKB_MAX_ALLOC,
+ MAX_SKB_FRAGS * PAGE_SIZE);
+ data_len = PAGE_ALIGN(data_len);
+
+ BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
+ }
+
+ skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ PAGE_ALLOC_COSTLY_ORDER);
+ if (skb == NULL)
goto out;
- memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp)
- unix_attach_fds(siocb->scm, skb);
+ err = unix_scm_to_skb(siocb->scm, skb, true);
+ if (err < 0)
+ goto out_free;
+ max_level = err + 1;
+ unix_get_secdata(siocb->scm, skb);
- skb->h.raw = skb->data;
- err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+ skb_put(skb, len - data_len);
+ skb->data_len = data_len;
+ skb->len = len;
+ err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
if (err)
goto out_free;
@@ -1302,13 +1528,19 @@ restart:
if (sunaddr == NULL)
goto out_free;
- other = unix_find_other(sunaddr, namelen, sk->sk_type,
+ other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
hash, &err);
- if (other==NULL)
+ if (other == NULL)
goto out_free;
}
- unix_state_rlock(other);
+ if (sk_filter(other, skb) < 0) {
+ /* Toss the packet but do not return any error to the sender */
+ err = len;
+ goto out_free;
+ }
+
+ unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
@@ -1318,20 +1550,20 @@ restart:
* Check with 1003.1g - what should
* datagram error
*/
- unix_state_runlock(other);
+ unix_state_unlock(other);
sock_put(other);
err = 0;
- unix_state_wlock(sk);
+ unix_state_lock(sk);
if (unix_peer(sk) == other) {
- unix_peer(sk)=NULL;
- unix_state_wunlock(sk);
+ unix_peer(sk) = NULL;
+ unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
- unix_state_wunlock(sk);
+ unix_state_unlock(sk);
}
other = NULL;
@@ -1350,9 +1582,7 @@ restart:
goto out_unlock;
}
- if (unix_peer(other) != sk &&
- (skb_queue_len(&other->sk_receive_queue) >
- other->sk_max_ack_backlog)) {
+ if (unix_peer(other) != sk && unix_recvq_full(other)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
@@ -1367,15 +1597,20 @@ restart:
goto restart;
}
+ if (sock_flag(other, SOCK_RCVTSTAMP))
+ __net_timestamp(skb);
+ maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
- unix_state_runlock(other);
- other->sk_data_ready(other, len);
+ if (max_level > unix_sk(other)->recursion_level)
+ unix_sk(other)->recursion_level = max_level;
+ unix_state_unlock(other);
+ other->sk_data_ready(other);
sock_put(other);
scm_destroy(siocb->scm);
return len;
out_unlock:
- unix_state_runlock(other);
+ unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
@@ -1385,22 +1620,29 @@ out:
return err;
}
-
+/* We use paged skbs for stream sockets, and limit occupancy to 32768
+ * bytes, and a minimun of a full page.
+ */
+#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
+
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct sock *other = NULL;
- struct sockaddr_un *sunaddr=msg->msg_name;
- int err,size;
+ int err, size;
struct sk_buff *skb;
- int sent=0;
+ int sent = 0;
struct scm_cookie tmp_scm;
+ bool fds_sent = false;
+ int max_level;
+ int data_len;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
- err = scm_send(sock, msg, siocb->scm);
+ wait_for_unix_gc();
+ err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
return err;
@@ -1412,9 +1654,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
- sunaddr = NULL;
err = -ENOTCONN;
- other = unix_peer_get(sk);
+ other = unix_peer(sk);
if (!other)
goto out_err;
}
@@ -1422,61 +1663,58 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
- while(sent < len)
- {
- /*
- * Optimisation for the fact that under 0.01% of X messages typically
- * need breaking up.
- */
-
- size=len-sent;
+ while (sent < len) {
+ size = len - sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > sk->sk_sndbuf / 2 - 64)
- size = sk->sk_sndbuf / 2 - 64;
+ size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
- if (size > SKB_MAX_ALLOC)
- size = SKB_MAX_ALLOC;
-
- /*
- * Grab a buffer
- */
-
- skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
+ /* allow fallback to order-0 allocations */
+ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
- if (skb==NULL)
- goto out_err;
+ data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
- /*
- * If you pass two values to the sock_alloc_send_skb
- * it tries to grab the large buffer with GFP_NOFS
- * (which can fail easily), and if it fails grab the
- * fallback size buffer which is under a page and will
- * succeed. [Alan]
- */
- size = min_t(int, size, skb_tailroom(skb));
+ data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
- memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp)
- unix_attach_fds(siocb->scm, skb);
+ skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ get_order(UNIX_SKB_FRAGS_SZ));
+ if (!skb)
+ goto out_err;
- if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
+ /* Only send the fds in the first buffer */
+ err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto out_err;
+ }
+ max_level = err + 1;
+ fds_sent = true;
+
+ skb_put(skb, size - data_len);
+ skb->data_len = data_len;
+ skb->len = size;
+ err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
+ sent, size);
+ if (err) {
kfree_skb(skb);
goto out_err;
}
- unix_state_rlock(other);
+ unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
+ maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
- unix_state_runlock(other);
- other->sk_data_ready(other, size);
- sent+=size;
+ if (max_level > unix_sk(other)->recursion_level)
+ unix_sk(other)->recursion_level = max_level;
+ unix_state_unlock(other);
+ other->sk_data_ready(other);
+ sent += size;
}
- sock_put(other);
scm_destroy(siocb->scm);
siocb->scm = NULL;
@@ -1484,15 +1722,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
return sent;
pipe_err_free:
- unix_state_runlock(other);
+ unix_state_unlock(other);
kfree_skb(skb);
pipe_err:
- if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
- send_sig(SIGPIPE,current,0);
+ if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
err = -EPIPE;
out_err:
- if (other)
- sock_put(other);
scm_destroy(siocb->scm);
siocb->scm = NULL;
return sent ? : err;
@@ -1503,7 +1739,7 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
{
int err;
struct sock *sk = sock->sk;
-
+
err = sock_error(sk);
if (err)
return err;
@@ -1516,12 +1752,23 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
return unix_dgram_sendmsg(kiocb, sock, msg, len);
}
-
+
+static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
+}
+
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
- msg->msg_namelen = 0;
if (u->addr) {
msg->msg_namelen = u->addr->len;
memcpy(msg->msg_name, u->addr->name, u->addr->len);
@@ -1539,87 +1786,109 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
int noblock = flags & MSG_DONTWAIT;
struct sk_buff *skb;
int err;
+ int peeked, skip;
err = -EOPNOTSUPP;
if (flags&MSG_OOB)
goto out;
- msg->msg_namelen = 0;
+ err = mutex_lock_interruptible(&u->readlock);
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
- down(&u->readsem);
+ skip = sk_peek_offset(sk, flags);
- skb = skb_recv_datagram(sk, flags, noblock, &err);
- if (!skb)
+ skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
+ if (!skb) {
+ unix_state_lock(sk);
+ /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
+ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ err = 0;
+ unix_state_unlock(sk);
goto out_unlock;
+ }
- wake_up_interruptible(&u->peer_wait);
+ wake_up_interruptible_sync_poll(&u->peer_wait,
+ POLLOUT | POLLWRNORM | POLLWRBAND);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
- if (size > skb->len)
- size = skb->len;
- else if (size < skb->len)
+ if (size > skb->len - skip)
+ size = skb->len - skip;
+ else if (size < skb->len - skip)
msg->msg_flags |= MSG_TRUNC;
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
+ err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
if (err)
goto out_free;
+ if (sock_flag(sk, SOCK_RCVTSTAMP))
+ __sock_recv_timestamp(msg, sk, skb);
+
if (!siocb->scm) {
siocb->scm = &tmp_scm;
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
- siocb->scm->creds = *UNIXCREDS(skb);
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
+ unix_set_secdata(siocb->scm, skb);
- if (!(flags & MSG_PEEK))
- {
+ if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
- }
- else
- {
+
+ sk_peek_offset_bwd(sk, skb->len);
+ } else {
/* It is questionable: on PEEK we could:
- do not return fds - good, but too simple 8)
- return fds, and do not return them on read (old strategy,
apparently wrong)
- clone fds (I chose it for now, it is the most universal
solution)
-
- POSIX 1003.1g does not actually define this clearly
- at all. POSIX 1003.1g doesn't define a lot of things
- clearly however!
-
+
+ POSIX 1003.1g does not actually define this clearly
+ at all. POSIX 1003.1g doesn't define a lot of things
+ clearly however!
+
*/
+
+ sk_peek_offset_fwd(sk, size);
+
if (UNIXCB(skb).fp)
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
}
- err = size;
+ err = (flags & MSG_TRUNC) ? skb->len - skip : size;
scm_recv(sock, msg, siocb->scm, flags);
out_free:
- skb_free_datagram(sk,skb);
+ skb_free_datagram(sk, skb);
out_unlock:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
}
/*
- * Sleep until data has arrive. But check for races..
+ * Sleep until more data has arrived. But check for races..
*/
-
-static long unix_stream_data_wait(struct sock * sk, long timeo)
+static long unix_stream_data_wait(struct sock *sk, long timeo,
+ struct sk_buff *last)
{
DEFINE_WAIT(wait);
- unix_state_rlock(sk);
+ unix_state_lock(sk);
for (;;) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ if (skb_peek_tail(&sk->sk_receive_queue) != last ||
sk->sk_err ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
@@ -1627,18 +1896,21 @@ static long unix_stream_data_wait(struct sock * sk, long timeo)
break;
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- unix_state_runlock(sk);
- timeo = schedule_timeout(timeo);
- unix_state_rlock(sk);
+ unix_state_unlock(sk);
+ timeo = freezable_schedule_timeout(timeo);
+ unix_state_lock(sk);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
- finish_wait(sk->sk_sleep, &wait);
- unix_state_runlock(sk);
+ finish_wait(sk_sleep(sk), &wait);
+ unix_state_unlock(sk);
return timeo;
}
-
+static unsigned int unix_skb_len(const struct sk_buff *skb)
+{
+ return skb->len - UNIXCB(skb).consumed;
+}
static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
@@ -1648,12 +1920,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct scm_cookie tmp_scm;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
- struct sockaddr_un *sunaddr=msg->msg_name;
+ DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
int copied = 0;
+ int noblock = flags & MSG_DONTWAIT;
int check_creds = 0;
int target;
int err = 0;
long timeo;
+ int skip;
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
@@ -1664,9 +1938,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
-
- msg->msg_namelen = 0;
+ timeo = sock_rcvtimeo(sk, noblock);
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
@@ -1677,64 +1949,89 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
- down(&u->readsem);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
- do
- {
+ do {
int chunk;
- struct sk_buff *skb;
+ struct sk_buff *skb, *last;
- skb = skb_dequeue(&sk->sk_receive_queue);
- if (skb==NULL)
- {
+ unix_state_lock(sk);
+ last = skb = skb_peek(&sk->sk_receive_queue);
+again:
+ if (skb == NULL) {
+ unix_sk(sk)->recursion_level = 0;
if (copied >= target)
- break;
+ goto unlock;
/*
* POSIX 1003.1g mandates this order.
*/
-
- if ((err = sock_error(sk)) != 0)
- break;
+
+ err = sock_error(sk);
+ if (err)
+ goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- break;
+ goto unlock;
+
+ unix_state_unlock(sk);
err = -EAGAIN;
if (!timeo)
break;
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
- timeo = unix_stream_data_wait(sk, timeo);
+ timeo = unix_stream_data_wait(sk, timeo, last);
- if (signal_pending(current)) {
+ if (signal_pending(current)
+ || mutex_lock_interruptible(&u->readlock)) {
err = sock_intr_errno(timeo);
goto out;
}
- down(&u->readsem);
+
continue;
+ unlock:
+ unix_state_unlock(sk);
+ break;
}
+ skip = sk_peek_offset(sk, flags);
+ while (skip >= unix_skb_len(skb)) {
+ skip -= unix_skb_len(skb);
+ last = skb;
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ if (!skb)
+ goto again;
+ }
+
+ unix_state_unlock(sk);
+
if (check_creds) {
/* Never glue messages from different writers */
- if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
- skb_queue_head(&sk->sk_receive_queue, skb);
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+ !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
+ !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
break;
- }
- } else {
+ } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
/* Copy credentials */
- siocb->scm->creds = *UNIXCREDS(skb);
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
check_creds = 1;
}
/* Copy address just once */
- if (sunaddr)
- {
+ if (sunaddr) {
unix_copy_addr(msg, skb->sk);
sunaddr = NULL;
}
- chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
- skb_queue_head(&sk->sk_receive_queue, skb);
+ chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
+ if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
+ msg->msg_iov, chunk)) {
if (copied == 0)
copied = -EFAULT;
break;
@@ -1743,39 +2040,35 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
size -= chunk;
/* Mark read part of skb as used */
- if (!(flags & MSG_PEEK))
- {
- skb_pull(skb, chunk);
+ if (!(flags & MSG_PEEK)) {
+ UNIXCB(skb).consumed += chunk;
+
+ sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
- /* put the skb back if we didn't use it up.. */
- if (skb->len)
- {
- skb_queue_head(&sk->sk_receive_queue, skb);
+ if (unix_skb_len(skb))
break;
- }
- kfree_skb(skb);
+ skb_unlink(skb, &sk->sk_receive_queue);
+ consume_skb(skb);
if (siocb->scm->fp)
break;
- }
- else
- {
+ } else {
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
- /* put message back and return */
- skb_queue_head(&sk->sk_receive_queue, skb);
+ sk_peek_offset_fwd(sk, chunk);
+
break;
}
} while (size);
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
return copied ? : err;
@@ -1786,92 +2079,108 @@ static int unix_shutdown(struct socket *sock, int mode)
struct sock *sk = sock->sk;
struct sock *other;
- mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
-
- if (mode) {
- unix_state_wlock(sk);
- sk->sk_shutdown |= mode;
- other=unix_peer(sk);
- if (other)
- sock_hold(other);
- unix_state_wunlock(sk);
- sk->sk_state_change(sk);
-
- if (other &&
- (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
-
- int peer_mode = 0;
-
- if (mode&RCV_SHUTDOWN)
- peer_mode |= SEND_SHUTDOWN;
- if (mode&SEND_SHUTDOWN)
- peer_mode |= RCV_SHUTDOWN;
- unix_state_wlock(other);
- other->sk_shutdown |= peer_mode;
- unix_state_wunlock(other);
- other->sk_state_change(other);
- read_lock(&other->sk_callback_lock);
- if (peer_mode == SHUTDOWN_MASK)
- sk_wake_async(other,1,POLL_HUP);
- else if (peer_mode & RCV_SHUTDOWN)
- sk_wake_async(other,1,POLL_IN);
- read_unlock(&other->sk_callback_lock);
- }
- if (other)
- sock_put(other);
+ if (mode < SHUT_RD || mode > SHUT_RDWR)
+ return -EINVAL;
+ /* This maps:
+ * SHUT_RD (0) -> RCV_SHUTDOWN (1)
+ * SHUT_WR (1) -> SEND_SHUTDOWN (2)
+ * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
+ */
+ ++mode;
+
+ unix_state_lock(sk);
+ sk->sk_shutdown |= mode;
+ other = unix_peer(sk);
+ if (other)
+ sock_hold(other);
+ unix_state_unlock(sk);
+ sk->sk_state_change(sk);
+
+ if (other &&
+ (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
+
+ int peer_mode = 0;
+
+ if (mode&RCV_SHUTDOWN)
+ peer_mode |= SEND_SHUTDOWN;
+ if (mode&SEND_SHUTDOWN)
+ peer_mode |= RCV_SHUTDOWN;
+ unix_state_lock(other);
+ other->sk_shutdown |= peer_mode;
+ unix_state_unlock(other);
+ other->sk_state_change(other);
+ if (peer_mode == SHUTDOWN_MASK)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
+ else if (peer_mode & RCV_SHUTDOWN)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
}
+ if (other)
+ sock_put(other);
+
return 0;
}
+long unix_inq_len(struct sock *sk)
+{
+ struct sk_buff *skb;
+ long amount = 0;
+
+ if (sk->sk_state == TCP_LISTEN)
+ return -EINVAL;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ if (sk->sk_type == SOCK_STREAM ||
+ sk->sk_type == SOCK_SEQPACKET) {
+ skb_queue_walk(&sk->sk_receive_queue, skb)
+ amount += unix_skb_len(skb);
+ } else {
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ amount = skb->len;
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ return amount;
+}
+EXPORT_SYMBOL_GPL(unix_inq_len);
+
+long unix_outq_len(struct sock *sk)
+{
+ return sk_wmem_alloc_get(sk);
+}
+EXPORT_SYMBOL_GPL(unix_outq_len);
+
static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
- long amount=0;
+ long amount = 0;
int err;
- switch(cmd)
- {
- case SIOCOUTQ:
- amount = atomic_read(&sk->sk_wmem_alloc);
- err = put_user(amount, (int __user *)arg);
- break;
- case SIOCINQ:
- {
- struct sk_buff *skb;
-
- if (sk->sk_state == TCP_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- spin_lock(&sk->sk_receive_queue.lock);
- if (sk->sk_type == SOCK_STREAM ||
- sk->sk_type == SOCK_SEQPACKET) {
- skb_queue_walk(&sk->sk_receive_queue, skb)
- amount += skb->len;
- } else {
- skb = skb_peek(&sk->sk_receive_queue);
- if (skb)
- amount=skb->len;
- }
- spin_unlock(&sk->sk_receive_queue.lock);
+ switch (cmd) {
+ case SIOCOUTQ:
+ amount = unix_outq_len(sk);
+ err = put_user(amount, (int __user *)arg);
+ break;
+ case SIOCINQ:
+ amount = unix_inq_len(sk);
+ if (amount < 0)
+ err = amount;
+ else
err = put_user(amount, (int __user *)arg);
- break;
- }
-
- default:
- err = dev_ioctl(cmd, (void __user *)arg);
- break;
+ break;
+ default:
+ err = -ENOIOCTLCMD;
+ break;
}
return err;
}
-static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
+static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask;
- poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
@@ -1879,14 +2188,16 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl
mask |= POLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
- if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
+ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+ sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/*
@@ -1899,54 +2210,151 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl
return mask;
}
+static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk, *other;
+ unsigned int mask, writable;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ /* exceptional events? */
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ mask |= POLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+
+ /* readable? */
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_state == TCP_CLOSE)
+ mask |= POLLHUP;
+ /* connection hasn't started yet? */
+ if (sk->sk_state == TCP_SYN_SENT)
+ return mask;
+ }
+
+ /* No write status requested, avoid expensive OUT tests. */
+ if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
+ return mask;
+
+ writable = unix_writable(sk);
+ other = unix_peer_get(sk);
+ if (other) {
+ if (unix_peer(other) != sk) {
+ sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
+ if (unix_recvq_full(other))
+ writable = 0;
+ }
+ sock_put(other);
+ }
+
+ if (writable)
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ else
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+ return mask;
+}
#ifdef CONFIG_PROC_FS
-static struct sock *unix_seq_idx(int *iter, loff_t pos)
+
+#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
+
+#define get_bucket(x) ((x) >> BUCKET_SPACE)
+#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
+#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
+
+static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
{
- loff_t off = 0;
- struct sock *s;
+ unsigned long offset = get_offset(*pos);
+ unsigned long bucket = get_bucket(*pos);
+ struct sock *sk;
+ unsigned long count = 0;
- for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
- if (off == pos)
- return s;
- ++off;
+ for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
+ if (sock_net(sk) != seq_file_net(seq))
+ continue;
+ if (++count == offset)
+ break;
}
- return NULL;
+
+ return sk;
}
+static struct sock *unix_next_socket(struct seq_file *seq,
+ struct sock *sk,
+ loff_t *pos)
+{
+ unsigned long bucket;
+
+ while (sk > (struct sock *)SEQ_START_TOKEN) {
+ sk = sk_next(sk);
+ if (!sk)
+ goto next_bucket;
+ if (sock_net(sk) == seq_file_net(seq))
+ return sk;
+ }
+
+ do {
+ sk = unix_from_bucket(seq, pos);
+ if (sk)
+ return sk;
+
+next_bucket:
+ bucket = get_bucket(*pos) + 1;
+ *pos = set_bucket_offset(bucket, 1);
+ } while (bucket < ARRAY_SIZE(unix_socket_table));
+
+ return NULL;
+}
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(unix_table_lock)
{
- read_lock(&unix_table_lock);
- return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
+ spin_lock(&unix_table_lock);
+
+ if (!*pos)
+ return SEQ_START_TOKEN;
+
+ if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
+ return NULL;
+
+ return unix_next_socket(seq, NULL, pos);
}
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
-
- if (v == (void *)1)
- return first_unix_socket(seq->private);
- return next_unix_socket(seq->private, v);
+ return unix_next_socket(seq, v, pos);
}
static void unix_seq_stop(struct seq_file *seq, void *v)
+ __releases(unix_table_lock)
{
- read_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_lock);
}
static int unix_seq_show(struct seq_file *seq, void *v)
{
-
- if (v == (void *)1)
+
+ if (v == SEQ_START_TOKEN)
seq_puts(seq, "Num RefCount Protocol Flags Type St "
"Inode Path\n");
else {
struct sock *s = v;
struct unix_sock *u = unix_sk(s);
- unix_state_rlock(s);
+ unix_state_lock(s);
- seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
+ seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
s,
atomic_read(&s->sk_refcnt),
0,
@@ -1972,82 +2380,87 @@ static int unix_seq_show(struct seq_file *seq, void *v)
for ( ; i < len; i++)
seq_putc(seq, u->addr->name->sun_path[i]);
}
- unix_state_runlock(s);
+ unix_state_unlock(s);
seq_putc(seq, '\n');
}
return 0;
}
-static struct seq_operations unix_seq_ops = {
+static const struct seq_operations unix_seq_ops = {
.start = unix_seq_start,
.next = unix_seq_next,
.stop = unix_seq_stop,
.show = unix_seq_show,
};
-
static int unix_seq_open(struct inode *inode, struct file *file)
{
- struct seq_file *seq;
- int rc = -ENOMEM;
- int *iter = kmalloc(sizeof(int), GFP_KERNEL);
-
- if (!iter)
- goto out;
-
- rc = seq_open(file, &unix_seq_ops);
- if (rc)
- goto out_kfree;
-
- seq = file->private_data;
- seq->private = iter;
- *iter = 0;
-out:
- return rc;
-out_kfree:
- kfree(iter);
- goto out;
+ return seq_open_net(inode, file, &unix_seq_ops,
+ sizeof(struct seq_net_private));
}
-static struct file_operations unix_seq_fops = {
+static const struct file_operations unix_seq_fops = {
.owner = THIS_MODULE,
.open = unix_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release_net,
};
#endif
-static struct net_proto_family unix_family_ops = {
+static const struct net_proto_family unix_family_ops = {
.family = PF_UNIX,
.create = unix_create,
.owner = THIS_MODULE,
};
-static int __init af_unix_init(void)
+
+static int __net_init unix_net_init(struct net *net)
{
- int rc = -1;
- struct sk_buff *dummy_skb;
+ int error = -ENOMEM;
- if (sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)) {
- printk(KERN_CRIT "%s: panic\n", __FUNCTION__);
+ net->unx.sysctl_max_dgram_qlen = 10;
+ if (unix_sysctl_register(net))
+ goto out;
+
+#ifdef CONFIG_PROC_FS
+ if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
+ unix_sysctl_unregister(net);
goto out;
}
+#endif
+ error = 0;
+out:
+ return error;
+}
+
+static void __net_exit unix_net_exit(struct net *net)
+{
+ unix_sysctl_unregister(net);
+ remove_proc_entry("unix", net->proc_net);
+}
+
+static struct pernet_operations unix_net_ops = {
+ .init = unix_net_init,
+ .exit = unix_net_exit,
+};
+
+static int __init af_unix_init(void)
+{
+ int rc = -1;
+
+ BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
rc = proto_register(&unix_proto, 1);
- if (rc != 0) {
- printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
- __FUNCTION__);
+ if (rc != 0) {
+ pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
goto out;
}
sock_register(&unix_family_ops);
-#ifdef CONFIG_PROC_FS
- proc_net_fops_create("unix", 0, &unix_seq_fops);
-#endif
- unix_sysctl_register();
+ register_pernet_subsys(&unix_net_ops);
out:
return rc;
}
@@ -2055,12 +2468,15 @@ out:
static void __exit af_unix_exit(void)
{
sock_unregister(PF_UNIX);
- unix_sysctl_unregister();
- proc_net_remove("unix");
proto_unregister(&unix_proto);
+ unregister_pernet_subsys(&unix_net_ops);
}
-module_init(af_unix_init);
+/* Earlier than device_initcall() so that other drivers invoking
+ request_module() don't end up in a loop when modprobe tries
+ to use a UNIX socket. But later than subsys_initcall() because
+ we depend on stuff initialised there */
+fs_initcall(af_unix_init);
module_exit(af_unix_exit);
MODULE_LICENSE("GPL");
diff --git a/net/unix/diag.c b/net/unix/diag.c
new file mode 100644
index 00000000000..86fa0f3b2ca
--- /dev/null
+++ b/net/unix/diag.c
@@ -0,0 +1,327 @@
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/sock_diag.h>
+#include <linux/unix_diag.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/af_unix.h>
+#include <net/tcp_states.h>
+
+static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct unix_address *addr = unix_sk(sk)->addr;
+
+ if (!addr)
+ return 0;
+
+ return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
+ addr->name->sun_path);
+}
+
+static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct dentry *dentry = unix_sk(sk)->path.dentry;
+
+ if (dentry) {
+ struct unix_diag_vfs uv = {
+ .udiag_vfs_ino = dentry->d_inode->i_ino,
+ .udiag_vfs_dev = dentry->d_sb->s_dev,
+ };
+
+ return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
+ }
+
+ return 0;
+}
+
+static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct sock *peer;
+ int ino;
+
+ peer = unix_peer_get(sk);
+ if (peer) {
+ unix_state_lock(peer);
+ ino = sock_i_ino(peer);
+ unix_state_unlock(peer);
+ sock_put(peer);
+
+ return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
+ }
+
+ return 0;
+}
+
+static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct sk_buff *skb;
+ struct nlattr *attr;
+ u32 *buf;
+ int i;
+
+ if (sk->sk_state == TCP_LISTEN) {
+ spin_lock(&sk->sk_receive_queue.lock);
+
+ attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
+ sk->sk_receive_queue.qlen * sizeof(u32));
+ if (!attr)
+ goto errout;
+
+ buf = nla_data(attr);
+ i = 0;
+ skb_queue_walk(&sk->sk_receive_queue, skb) {
+ struct sock *req, *peer;
+
+ req = skb->sk;
+ /*
+ * The state lock is outer for the same sk's
+ * queue lock. With the other's queue locked it's
+ * OK to lock the state.
+ */
+ unix_state_lock_nested(req);
+ peer = unix_sk(req)->peer;
+ buf[i++] = (peer ? sock_i_ino(peer) : 0);
+ unix_state_unlock(req);
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+ }
+
+ return 0;
+
+errout:
+ spin_unlock(&sk->sk_receive_queue.lock);
+ return -EMSGSIZE;
+}
+
+static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct unix_diag_rqlen rql;
+
+ if (sk->sk_state == TCP_LISTEN) {
+ rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ rql.udiag_wqueue = sk->sk_max_ack_backlog;
+ } else {
+ rql.udiag_rqueue = (u32) unix_inq_len(sk);
+ rql.udiag_wqueue = (u32) unix_outq_len(sk);
+ }
+
+ return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+ u32 portid, u32 seq, u32 flags, int sk_ino)
+{
+ struct nlmsghdr *nlh;
+ struct unix_diag_msg *rep;
+
+ nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
+ flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ rep = nlmsg_data(nlh);
+ rep->udiag_family = AF_UNIX;
+ rep->udiag_type = sk->sk_type;
+ rep->udiag_state = sk->sk_state;
+ rep->pad = 0;
+ rep->udiag_ino = sk_ino;
+ sock_diag_save_cookie(sk, rep->udiag_cookie);
+
+ if ((req->udiag_show & UDIAG_SHOW_NAME) &&
+ sk_diag_dump_name(sk, skb))
+ goto out_nlmsg_trim;
+
+ if ((req->udiag_show & UDIAG_SHOW_VFS) &&
+ sk_diag_dump_vfs(sk, skb))
+ goto out_nlmsg_trim;
+
+ if ((req->udiag_show & UDIAG_SHOW_PEER) &&
+ sk_diag_dump_peer(sk, skb))
+ goto out_nlmsg_trim;
+
+ if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
+ sk_diag_dump_icons(sk, skb))
+ goto out_nlmsg_trim;
+
+ if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
+ sk_diag_show_rqlen(sk, skb))
+ goto out_nlmsg_trim;
+
+ if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
+ sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+ goto out_nlmsg_trim;
+
+ if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+ goto out_nlmsg_trim;
+
+ return nlmsg_end(skb, nlh);
+
+out_nlmsg_trim:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+ u32 portid, u32 seq, u32 flags)
+{
+ int sk_ino;
+
+ unix_state_lock(sk);
+ sk_ino = sock_i_ino(sk);
+ unix_state_unlock(sk);
+
+ if (!sk_ino)
+ return 0;
+
+ return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
+}
+
+static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct unix_diag_req *req;
+ int num, s_num, slot, s_slot;
+ struct net *net = sock_net(skb->sk);
+
+ req = nlmsg_data(cb->nlh);
+
+ s_slot = cb->args[0];
+ num = s_num = cb->args[1];
+
+ spin_lock(&unix_table_lock);
+ for (slot = s_slot;
+ slot < ARRAY_SIZE(unix_socket_table);
+ s_num = 0, slot++) {
+ struct sock *sk;
+
+ num = 0;
+ sk_for_each(sk, &unix_socket_table[slot]) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
+ if (num < s_num)
+ goto next;
+ if (!(req->udiag_states & (1 << sk->sk_state)))
+ goto next;
+ if (sk_diag_dump(sk, skb, req,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) < 0)
+ goto done;
+next:
+ num++;
+ }
+ }
+done:
+ spin_unlock(&unix_table_lock);
+ cb->args[0] = slot;
+ cb->args[1] = num;
+
+ return skb->len;
+}
+
+static struct sock *unix_lookup_by_ino(int ino)
+{
+ int i;
+ struct sock *sk;
+
+ spin_lock(&unix_table_lock);
+ for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
+ sk_for_each(sk, &unix_socket_table[i])
+ if (ino == sock_i_ino(sk)) {
+ sock_hold(sk);
+ spin_unlock(&unix_table_lock);
+
+ return sk;
+ }
+ }
+
+ spin_unlock(&unix_table_lock);
+ return NULL;
+}
+
+static int unix_diag_get_exact(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ struct unix_diag_req *req)
+{
+ int err = -EINVAL;
+ struct sock *sk;
+ struct sk_buff *rep;
+ unsigned int extra_len;
+ struct net *net = sock_net(in_skb->sk);
+
+ if (req->udiag_ino == 0)
+ goto out_nosk;
+
+ sk = unix_lookup_by_ino(req->udiag_ino);
+ err = -ENOENT;
+ if (sk == NULL)
+ goto out_nosk;
+
+ err = sock_diag_check_cookie(sk, req->udiag_cookie);
+ if (err)
+ goto out;
+
+ extra_len = 256;
+again:
+ err = -ENOMEM;
+ rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
+ if (!rep)
+ goto out;
+
+ err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0, req->udiag_ino);
+ if (err < 0) {
+ nlmsg_free(rep);
+ extra_len += 256;
+ if (extra_len >= PAGE_SIZE)
+ goto out;
+
+ goto again;
+ }
+ err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
+ MSG_DONTWAIT);
+ if (err > 0)
+ err = 0;
+out:
+ if (sk)
+ sock_put(sk);
+out_nosk:
+ return err;
+}
+
+static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+ int hdrlen = sizeof(struct unix_diag_req);
+ struct net *net = sock_net(skb->sk);
+
+ if (nlmsg_len(h) < hdrlen)
+ return -EINVAL;
+
+ if (h->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = unix_diag_dump,
+ };
+ return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+ } else
+ return unix_diag_get_exact(skb, h, nlmsg_data(h));
+}
+
+static const struct sock_diag_handler unix_diag_handler = {
+ .family = AF_UNIX,
+ .dump = unix_diag_handler_dump,
+};
+
+static int __init unix_diag_init(void)
+{
+ return sock_diag_register(&unix_diag_handler);
+}
+
+static void __exit unix_diag_exit(void)
+{
+ sock_diag_unregister(&unix_diag_handler);
+}
+
+module_init(unix_diag_init);
+module_exit(unix_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6ffc64e1712..9bc73f87f64 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -62,20 +62,24 @@
* AV 1 Mar 1999
* Damn. Added missing check for ->dead in listen queues scanning.
*
+ * Miklos Szeredi 25 Jun 2007
+ * Reimplement with a cycle collecting algorithm. This should
+ * solve several problems with the previous code, like being racy
+ * wrt receive and holding up unrelated socket operations.
*/
-
+
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
-#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
#include <net/sock.h>
#include <net/af_unix.h>
@@ -84,25 +88,25 @@
/* Internal data structures and random procedures: */
-#define GC_HEAD ((struct sock *)(-1))
-#define GC_ORPHAN ((struct sock *)(-3))
-
-static struct sock *gc_current = GC_HEAD; /* stack of objects to mark */
+static LIST_HEAD(gc_inflight_list);
+static LIST_HEAD(gc_candidates);
+static DEFINE_SPINLOCK(unix_gc_lock);
+static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
-atomic_t unix_tot_inflight = ATOMIC_INIT(0);
+unsigned int unix_tot_inflight;
-static struct sock *unix_get_socket(struct file *filp)
+struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
- struct inode *inode = filp->f_dentry->d_inode;
+ struct inode *inode = file_inode(filp);
/*
* Socket ?
*/
- if (S_ISSOCK(inode->i_mode)) {
- struct socket * sock = SOCKET_I(inode);
- struct sock * s = sock->sk;
+ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
+ struct socket *sock = SOCKET_I(inode);
+ struct sock *s = sock->sk;
/*
* PF_UNIX ?
@@ -117,196 +121,266 @@ static struct sock *unix_get_socket(struct file *filp)
* Keep the number of times in flight count for the file
* descriptor if it is for an AF_UNIX socket.
*/
-
+
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
- if(s) {
- atomic_inc(&unix_sk(s)->inflight);
- atomic_inc(&unix_tot_inflight);
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+ spin_lock(&unix_gc_lock);
+ if (atomic_long_inc_return(&u->inflight) == 1) {
+ BUG_ON(!list_empty(&u->link));
+ list_add_tail(&u->link, &gc_inflight_list);
+ } else {
+ BUG_ON(list_empty(&u->link));
+ }
+ unix_tot_inflight++;
+ spin_unlock(&unix_gc_lock);
}
}
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
- if(s) {
- atomic_dec(&unix_sk(s)->inflight);
- atomic_dec(&unix_tot_inflight);
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+ spin_lock(&unix_gc_lock);
+ BUG_ON(list_empty(&u->link));
+ if (atomic_long_dec_and_test(&u->inflight))
+ list_del_init(&u->link);
+ unix_tot_inflight--;
+ spin_unlock(&unix_gc_lock);
}
}
+static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
+ struct sk_buff_head *hitlist)
+{
+ struct sk_buff *skb;
+ struct sk_buff *next;
-/*
- * Garbage Collector Support Functions
- */
+ spin_lock(&x->sk_receive_queue.lock);
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
+ /*
+ * Do we have file descriptors ?
+ */
+ if (UNIXCB(skb).fp) {
+ bool hit = false;
+ /*
+ * Process the descriptors of this socket
+ */
+ int nfd = UNIXCB(skb).fp->count;
+ struct file **fp = UNIXCB(skb).fp->fp;
+ while (nfd--) {
+ /*
+ * Get the socket the fd matches
+ * if it indeed does so
+ */
+ struct sock *sk = unix_get_socket(*fp++);
+ if (sk) {
+ struct unix_sock *u = unix_sk(sk);
+
+ /*
+ * Ignore non-candidates, they could
+ * have been added to the queues after
+ * starting the garbage collection
+ */
+ if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
+ hit = true;
+ func(u);
+ }
+ }
+ }
+ if (hit && hitlist != NULL) {
+ __skb_unlink(skb, &x->sk_receive_queue);
+ __skb_queue_tail(hitlist, skb);
+ }
+ }
+ }
+ spin_unlock(&x->sk_receive_queue.lock);
+}
-static inline struct sock *pop_stack(void)
+static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
+ struct sk_buff_head *hitlist)
{
- struct sock *p = gc_current;
- gc_current = unix_sk(p)->gc_tree;
- return p;
+ if (x->sk_state != TCP_LISTEN)
+ scan_inflight(x, func, hitlist);
+ else {
+ struct sk_buff *skb;
+ struct sk_buff *next;
+ struct unix_sock *u;
+ LIST_HEAD(embryos);
+
+ /*
+ * For a listening socket collect the queued embryos
+ * and perform a scan on them as well.
+ */
+ spin_lock(&x->sk_receive_queue.lock);
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
+ u = unix_sk(skb->sk);
+
+ /*
+ * An embryo cannot be in-flight, so it's safe
+ * to use the list link.
+ */
+ BUG_ON(!list_empty(&u->link));
+ list_add_tail(&u->link, &embryos);
+ }
+ spin_unlock(&x->sk_receive_queue.lock);
+
+ while (!list_empty(&embryos)) {
+ u = list_entry(embryos.next, struct unix_sock, link);
+ scan_inflight(&u->sk, func, hitlist);
+ list_del_init(&u->link);
+ }
+ }
}
-static inline int empty_stack(void)
+static void dec_inflight(struct unix_sock *usk)
{
- return gc_current == GC_HEAD;
+ atomic_long_dec(&usk->inflight);
}
-static void maybe_unmark_and_push(struct sock *x)
+static void inc_inflight(struct unix_sock *usk)
{
- struct unix_sock *u = unix_sk(x);
+ atomic_long_inc(&usk->inflight);
+}
- if (u->gc_tree != GC_ORPHAN)
- return;
- sock_hold(x);
- u->gc_tree = gc_current;
- gc_current = x;
+static void inc_inflight_move_tail(struct unix_sock *u)
+{
+ atomic_long_inc(&u->inflight);
+ /*
+ * If this still might be part of a cycle, move it to the end
+ * of the list, so that it's checked even if it was already
+ * passed over
+ */
+ if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
+ list_move_tail(&u->link, &gc_candidates);
}
+static bool gc_in_progress = false;
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
-/* The external entry point: unix_gc() */
+void wait_for_unix_gc(void)
+{
+ /*
+ * If number of inflight sockets is insane,
+ * force a garbage collect right now.
+ */
+ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
+ unix_gc();
+ wait_event(unix_gc_wait, gc_in_progress == false);
+}
+/* The external entry point: unix_gc() */
void unix_gc(void)
{
- static DECLARE_MUTEX(unix_gc_sem);
- int i;
- struct sock *s;
+ struct unix_sock *u;
+ struct unix_sock *next;
struct sk_buff_head hitlist;
- struct sk_buff *skb;
+ struct list_head cursor;
+ LIST_HEAD(not_cycle_list);
- /*
- * Avoid a recursive GC.
- */
-
- if (down_trylock(&unix_gc_sem))
- return;
+ spin_lock(&unix_gc_lock);
- read_lock(&unix_table_lock);
+ /* Avoid a recursive GC. */
+ if (gc_in_progress)
+ goto out;
- forall_unix_sockets(i, s)
- {
- unix_sk(s)->gc_tree = GC_ORPHAN;
- }
+ gc_in_progress = true;
/*
- * Everything is now marked
+ * First, select candidates for garbage collection. Only
+ * in-flight sockets are considered, and from those only ones
+ * which don't have any external reference.
+ *
+ * Holding unix_gc_lock will protect these candidates from
+ * being detached, and hence from gaining an external
+ * reference. Since there are no possible receivers, all
+ * buffers currently on the candidates' queues stay there
+ * during the garbage collection.
+ *
+ * We also know that no new candidate can be added onto the
+ * receive queues. Other, non candidate sockets _can_ be
+ * added to queue, so we must make sure only to touch
+ * candidates.
*/
+ list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
+ long total_refs;
+ long inflight_refs;
+
+ total_refs = file_count(u->sk.sk_socket->file);
+ inflight_refs = atomic_long_read(&u->inflight);
+
+ BUG_ON(inflight_refs < 1);
+ BUG_ON(total_refs < inflight_refs);
+ if (total_refs == inflight_refs) {
+ list_move_tail(&u->link, &gc_candidates);
+ __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
+ __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ }
+ }
- /* Invariant to be maintained:
- - everything unmarked is either:
- -- (a) on the stack, or
- -- (b) has all of its children unmarked
- - everything on the stack is always unmarked
- - nothing is ever pushed onto the stack twice, because:
- -- nothing previously unmarked is ever pushed on the stack
+ /*
+ * Now remove all internal in-flight reference to children of
+ * the candidates.
*/
+ list_for_each_entry(u, &gc_candidates, link)
+ scan_children(&u->sk, dec_inflight, NULL);
/*
- * Push root set
+ * Restore the references for children of all candidates,
+ * which have remaining references. Do this recursively, so
+ * only those remain, which form cyclic references.
+ *
+ * Use a "cursor" link, to make the list traversal safe, even
+ * though elements might be moved about.
*/
+ list_add(&cursor, &gc_candidates);
+ while (cursor.next != &gc_candidates) {
+ u = list_entry(cursor.next, struct unix_sock, link);
- forall_unix_sockets(i, s)
- {
- int open_count = 0;
+ /* Move cursor to after the current position. */
+ list_move(&cursor, &u->link);
- /*
- * If all instances of the descriptor are not
- * in flight we are in use.
- *
- * Special case: when socket s is embrion, it may be
- * hashed but still not in queue of listening socket.
- * In this case (see unix_create1()) we set artificial
- * negative inflight counter to close race window.
- * It is trick of course and dirty one.
- */
- if (s->sk_socket && s->sk_socket->file)
- open_count = file_count(s->sk_socket->file);
- if (open_count > atomic_read(&unix_sk(s)->inflight))
- maybe_unmark_and_push(s);
+ if (atomic_long_read(&u->inflight) > 0) {
+ list_move_tail(&u->link, &not_cycle_list);
+ __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ scan_children(&u->sk, inc_inflight_move_tail, NULL);
+ }
}
+ list_del(&cursor);
/*
- * Mark phase
+ * not_cycle_list contains those sockets which do not make up a
+ * cycle. Restore these to the inflight list.
*/
-
- while (!empty_stack())
- {
- struct sock *x = pop_stack();
- struct sock *sk;
-
- spin_lock(&x->sk_receive_queue.lock);
- skb = skb_peek(&x->sk_receive_queue);
-
- /*
- * Loop through all but first born
- */
-
- while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) {
- /*
- * Do we have file descriptors ?
- */
- if(UNIXCB(skb).fp)
- {
- /*
- * Process the descriptors of this socket
- */
- int nfd=UNIXCB(skb).fp->count;
- struct file **fp = UNIXCB(skb).fp->fp;
- while(nfd--)
- {
- /*
- * Get the socket the fd matches if
- * it indeed does so
- */
- if((sk=unix_get_socket(*fp++))!=NULL)
- {
- maybe_unmark_and_push(sk);
- }
- }
- }
- /* We have to scan not-yet-accepted ones too */
- if (x->sk_state == TCP_LISTEN)
- maybe_unmark_and_push(skb->sk);
- skb=skb->next;
- }
- spin_unlock(&x->sk_receive_queue.lock);
- sock_put(x);
+ while (!list_empty(&not_cycle_list)) {
+ u = list_entry(not_cycle_list.next, struct unix_sock, link);
+ __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
+ list_move_tail(&u->link, &gc_inflight_list);
}
+ /*
+ * Now gc_candidates contains only garbage. Restore original
+ * inflight counters for these as well, and remove the skbuffs
+ * which are creating the cycle(s).
+ */
skb_queue_head_init(&hitlist);
+ list_for_each_entry(u, &gc_candidates, link)
+ scan_children(&u->sk, inc_inflight, &hitlist);
- forall_unix_sockets(i, s)
- {
- struct unix_sock *u = unix_sk(s);
+ spin_unlock(&unix_gc_lock);
- if (u->gc_tree == GC_ORPHAN) {
- struct sk_buff *nextsk;
+ /* Here we are. Hitlist is filled. Die. */
+ __skb_queue_purge(&hitlist);
- spin_lock(&s->sk_receive_queue.lock);
- skb = skb_peek(&s->sk_receive_queue);
- while (skb &&
- skb != (struct sk_buff *)&s->sk_receive_queue) {
- nextsk = skb->next;
- /*
- * Do we have file descriptors ?
- */
- if (UNIXCB(skb).fp) {
- __skb_unlink(skb,
- &s->sk_receive_queue);
- __skb_queue_tail(&hitlist, skb);
- }
- skb = nextsk;
- }
- spin_unlock(&s->sk_receive_queue.lock);
- }
- u->gc_tree = GC_ORPHAN;
- }
- read_unlock(&unix_table_lock);
+ spin_lock(&unix_gc_lock);
- /*
- * Here we are. Hitlist is filled. Die.
- */
+ /* All candidates should have been detached by now. */
+ BUG_ON(!list_empty(&gc_candidates));
+ gc_in_progress = false;
+ wake_up(&unix_gc_wait);
- __skb_queue_purge(&hitlist);
- up(&unix_gc_sem);
+ out:
+ spin_unlock(&unix_gc_lock);
}
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 690ffa5d5bf..b3d515021b7 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -10,51 +10,52 @@
*/
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/sysctl.h>
#include <net/af_unix.h>
-static ctl_table unix_table[] = {
+static struct ctl_table unix_table[] = {
{
- .ctl_name = NET_UNIX_MAX_DGRAM_QLEN,
.procname = "max_dgram_qlen",
- .data = &sysctl_unix_max_dgram_qlen,
+ .data = &init_net.unx.sysctl_max_dgram_qlen,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
- { .ctl_name = 0 }
+ { }
};
-static ctl_table unix_net_table[] = {
- {
- .ctl_name = NET_UNIX,
- .procname = "unix",
- .mode = 0555,
- .child = unix_table
- },
- { .ctl_name = 0 }
-};
+int __net_init unix_sysctl_register(struct net *net)
+{
+ struct ctl_table *table;
-static ctl_table unix_root_table[] = {
- {
- .ctl_name = CTL_NET,
- .procname = "net",
- .mode = 0555,
- .child = unix_net_table
- },
- { .ctl_name = 0 }
-};
+ table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
+ if (table == NULL)
+ goto err_alloc;
-static struct ctl_table_header * unix_sysctl_header;
+ /* Don't export sysctls to unprivileged users */
+ if (net->user_ns != &init_user_ns)
+ table[0].procname = NULL;
-void unix_sysctl_register(void)
-{
- unix_sysctl_header = register_sysctl_table(unix_root_table, 0);
+ table[0].data = &net->unx.sysctl_max_dgram_qlen;
+ net->unx.ctl = register_net_sysctl(net, "net/unix", table);
+ if (net->unx.ctl == NULL)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kfree(table);
+err_alloc:
+ return -ENOMEM;
}
-void unix_sysctl_unregister(void)
+void unix_sysctl_unregister(struct net *net)
{
- unregister_sysctl_table(unix_sysctl_header);
-}
+ struct ctl_table *table;
+ table = net->unx.ctl->ctl_table_arg;
+ unregister_net_sysctl_table(net->unx.ctl);
+ kfree(table);
+}