diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-08-14 15:59:00 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-08-14 15:59:30 +0200 |
commit | 4cd1993f0046fbc765dbf20af90966f5661e3789 (patch) | |
tree | 8772c03b73159524183f08337b134503ddf8479e /net/core | |
parent | 97fd9ed48ce2b807edc363bef3e817aeeb5cd5e6 (diff) | |
parent | 64f1607ffbbc772685733ea63e6f7f4183df1b16 (diff) |
Merge branch 'linus' into timers/core
Reason: Martin's timekeeping cleanup series depends on both
timers/core and mainline changes.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 2 | ||||
-rw-r--r-- | net/core/dev.c | 23 | ||||
-rw-r--r-- | net/core/net_namespace.c | 2 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/core/sock.c | 49 |
5 files changed, 57 insertions, 21 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index 58abee1f1df..b0fe69211ee 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -712,7 +712,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, struct sock *sk = sock->sk; unsigned int mask; - poll_wait(file, sk->sk_sleep, wait); + sock_poll_wait(file, sk->sk_sleep, wait); mask = 0; /* exceptional events? */ diff --git a/net/core/dev.c b/net/core/dev.c index 70c27e0c7c3..6a94475aee8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3865,10 +3865,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr) ASSERT_RTNL(); + netif_addr_lock_bh(dev); err = __hw_addr_del(&dev->uc, addr, dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); + netif_addr_unlock_bh(dev); return err; } EXPORT_SYMBOL(dev_unicast_delete); @@ -3889,10 +3891,12 @@ int dev_unicast_add(struct net_device *dev, void *addr) ASSERT_RTNL(); + netif_addr_lock_bh(dev); err = __hw_addr_add(&dev->uc, addr, dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); + netif_addr_unlock_bh(dev); return err; } EXPORT_SYMBOL(dev_unicast_add); @@ -3949,7 +3953,8 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, * @from: source device * * Add newly added addresses to the destination device and release - * addresses that have no users left. + * addresses that have no users left. The source device must be + * locked by netif_tx_lock_bh. * * This function is intended to be called from the dev->set_rx_mode * function of layered software devices. @@ -3958,14 +3963,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) { int err = 0; - ASSERT_RTNL(); - if (to->addr_len != from->addr_len) return -EINVAL; + netif_addr_lock_bh(to); err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); if (!err) __dev_set_rx_mode(to); + netif_addr_unlock_bh(to); return err; } EXPORT_SYMBOL(dev_unicast_sync); @@ -3981,27 +3986,27 @@ EXPORT_SYMBOL(dev_unicast_sync); */ void dev_unicast_unsync(struct net_device *to, struct net_device *from) { - ASSERT_RTNL(); - if (to->addr_len != from->addr_len) return; + netif_addr_lock_bh(from); + netif_addr_lock(to); __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); __dev_set_rx_mode(to); + netif_addr_unlock(to); + netif_addr_unlock_bh(from); } EXPORT_SYMBOL(dev_unicast_unsync); static void dev_unicast_flush(struct net_device *dev) { - /* rtnl_mutex must be held here */ - + netif_addr_lock_bh(dev); __hw_addr_flush(&dev->uc); + netif_addr_unlock_bh(dev); } static void dev_unicast_init(struct net_device *dev) { - /* rtnl_mutex must be held here */ - __hw_addr_init(&dev->uc); } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index b7292a2719d..197283072cc 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -488,7 +488,7 @@ int net_assign_generic(struct net *net, int id, void *data) */ ng->len = id; - memcpy(&ng->ptr, &old_ng->ptr, old_ng->len); + memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); rcu_assign_pointer(net->gen, ng); call_rcu(&old_ng->rcu, net_generic_release); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 9675f312830..df30feb2fc7 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -740,7 +740,7 @@ int netpoll_setup(struct netpoll *np) np->name); break; } - cond_resched(); + msleep(1); } /* If carrier appears to come up instantly, we don't diff --git a/net/core/sock.c b/net/core/sock.c index b0ba569bc97..bbb25be7ddf 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -631,7 +631,7 @@ set_rcvbuf: case SO_TIMESTAMPING: if (val & ~SOF_TIMESTAMPING_MASK) { - ret = EINVAL; + ret = -EINVAL; break; } sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, @@ -919,13 +919,19 @@ static inline void sock_lock_init(struct sock *sk) af_family_keys + sk->sk_family); } +/* + * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, + * even temporarly, because of RCU lookups. sk_node should also be left as is. + */ static void sock_copy(struct sock *nsk, const struct sock *osk) { #ifdef CONFIG_SECURITY_NETWORK void *sptr = nsk->sk_security; #endif - - memcpy(nsk, osk, osk->sk_prot->obj_size); + BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != + sizeof(osk->sk_node) + sizeof(osk->sk_refcnt)); + memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, + osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); #ifdef CONFIG_SECURITY_NETWORK nsk->sk_security = sptr; security_sk_clone(osk, nsk); @@ -939,8 +945,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, struct kmem_cache *slab; slab = prot->slab; - if (slab != NULL) - sk = kmem_cache_alloc(slab, priority); + if (slab != NULL) { + sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); + if (!sk) + return sk; + if (priority & __GFP_ZERO) { + /* + * caches using SLAB_DESTROY_BY_RCU should let + * sk_node.next un-modified. Special care is taken + * when initializing object to zero. + */ + if (offsetof(struct sock, sk_node.next) != 0) + memset(sk, 0, offsetof(struct sock, sk_node.next)); + memset(&sk->sk_node.pprev, 0, + prot->obj_size - offsetof(struct sock, + sk_node.pprev)); + } + } else sk = kmalloc(prot->obj_size, priority); @@ -1125,6 +1146,11 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) newsk->sk_err = 0; newsk->sk_priority = 0; + /* + * Before updating sk_refcnt, we must commit prior changes to memory + * (Documentation/RCU/rculist_nulls.txt for details) + */ + smp_wmb(); atomic_set(&newsk->sk_refcnt, 2); /* @@ -1715,7 +1741,7 @@ EXPORT_SYMBOL(sock_no_sendpage); static void sock_def_wakeup(struct sock *sk) { read_lock(&sk->sk_callback_lock); - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + if (sk_has_sleeper(sk)) wake_up_interruptible_all(sk->sk_sleep); read_unlock(&sk->sk_callback_lock); } @@ -1723,7 +1749,7 @@ static void sock_def_wakeup(struct sock *sk) static void sock_def_error_report(struct sock *sk) { read_lock(&sk->sk_callback_lock); - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + if (sk_has_sleeper(sk)) wake_up_interruptible_poll(sk->sk_sleep, POLLERR); sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); read_unlock(&sk->sk_callback_lock); @@ -1732,7 +1758,7 @@ static void sock_def_error_report(struct sock *sk) static void sock_def_readable(struct sock *sk, int len) { read_lock(&sk->sk_callback_lock); - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + if (sk_has_sleeper(sk)) wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); @@ -1747,7 +1773,7 @@ static void sock_def_write_space(struct sock *sk) * progress. --DaveM */ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + if (sk_has_sleeper(sk)) wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND); @@ -1840,6 +1866,11 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_stamp = ktime_set(-1L, 0); + /* + * Before updating sk_refcnt, we must commit prior changes to memory + * (Documentation/RCU/rculist_nulls.txt for details) + */ + smp_wmb(); atomic_set(&sk->sk_refcnt, 1); atomic_set(&sk->sk_wmem_alloc, 1); atomic_set(&sk->sk_drops, 0); |