From fa438ccfdfd3f6db02c13b61b21454eb81cd6a13 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 4 Mar 2007 16:05:44 -0800 Subject: [NET]: Keep sk_backlog near sk_lock sk_backlog is a critical field of struct sock. (known famous words) It is (ab)used in hot paths, in particular in release_sock(), tcp_recvmsg(), tcp_v4_rcv(), sk_receive_skb(). It really makes sense to place it next to sk_lock, because sk_backlog is only used after sk_lock locked (and thus memory cache line in L1 cache). This should reduce cache misses and sk_lock acquisition time. (In theory, we could only move the head pointer near sk_lock, and leaving tail far away, because 'tail' is normally not so hot, but keep it simple :) ) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index 27c4f62382b..6d35d5775ba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -904,6 +904,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); + newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; atomic_set(&newsk->sk_rmem_alloc, 0); atomic_set(&newsk->sk_wmem_alloc, 0); @@ -923,7 +924,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; newsk->sk_send_head = NULL; - newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sock_reset_flag(newsk, SOCK_DONE); -- cgit v1.2.3-18-g5258 From b7aa0bf70c4afb9e38be25f5c0922498d0f8684c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2007 16:16:32 -0700 Subject: [NET]: convert network timestamps to ktime_t We currently use a special structure (struct skb_timeval) and plain 'struct timeval' to store packet timestamps in sk_buffs and struct sock. This has some drawbacks : - Fixed resolution of micro second. - Waste of space on 64bit platforms where sizeof(struct timeval)=16 I suggest using ktime_t that is a nice abstraction of high resolution time services, currently capable of nanosecond resolution. As sizeof(ktime_t) is 8 bytes, using ktime_t in 'struct sock' permits a 8 byte shrink of this structure on 64bit architectures. Some other structures also benefit from this size reduction (struct ipq in ipv4/ip_fragment.c, struct frag_queue in ipv6/reassembly.c, ...) Once this ktime infrastructure adopted, we can more easily provide nanosecond resolution on top of it. (ioctl SIOCGSTAMPNS and/or SO_TIMESTAMPNS/SCM_TIMESTAMPNS) Note : this patch includes a bug correction in compat_sock_get_timestamp() where a "err = 0;" was missing (so this syscall returned -ENOENT instead of 0) Signed-off-by: Eric Dumazet CC: Stephen Hemminger CC: John find Signed-off-by: David S. Miller --- net/core/sock.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index 6d35d5775ba..6ddb3664b99 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1512,8 +1512,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; - sk->sk_stamp.tv_sec = -1L; - sk->sk_stamp.tv_usec = -1L; + sk->sk_stamp = ktime_set(-1L, -1L); atomic_set(&sk->sk_refcnt, 1); } @@ -1554,14 +1553,17 @@ EXPORT_SYMBOL(release_sock); int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { + struct timeval tv; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk); - if (sk->sk_stamp.tv_sec == -1) + tv = ktime_to_timeval(sk->sk_stamp); + if (tv.tv_sec == -1) return -ENOENT; - if (sk->sk_stamp.tv_sec == 0) - do_gettimeofday(&sk->sk_stamp); - return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ? - -EFAULT : 0; + if (tv.tv_sec == 0) { + sk->sk_stamp = ktime_get_real(); + tv = ktime_to_timeval(sk->sk_stamp); + } + return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; } EXPORT_SYMBOL(sock_get_timestamp); -- cgit v1.2.3-18-g5258 From ae40eb1ef30ab4120bd3c8b7e3da99ee53d27a23 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 18 Mar 2007 17:33:16 -0700 Subject: [NET]: Introduce SIOCGSTAMPNS ioctl to get timestamps with nanosec resolution Now network timestamps use ktime_t infrastructure, we can add a new ioctl() SIOCGSTAMPNS command to get timestamps in 'struct timespec'. User programs can thus access to nanosecond resolution. Signed-off-by: Eric Dumazet CC: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/sock.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index 6ddb3664b99..cb48fa0e124 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1567,6 +1567,22 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) } EXPORT_SYMBOL(sock_get_timestamp); +int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) +{ + struct timespec ts; + if (!sock_flag(sk, SOCK_TIMESTAMP)) + sock_enable_timestamp(sk); + ts = ktime_to_timespec(sk->sk_stamp); + if (ts.tv_sec == -1) + return -ENOENT; + if (ts.tv_sec == 0) { + sk->sk_stamp = ktime_get_real(); + ts = ktime_to_timespec(sk->sk_stamp); + } + return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; +} +EXPORT_SYMBOL(sock_get_timestampns); + void sock_enable_timestamp(struct sock *sk) { if (!sock_flag(sk, SOCK_TIMESTAMP)) { -- cgit v1.2.3-18-g5258 From e71a4783aae059931f63b2d4e7013e36529badef Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 10 Apr 2007 20:10:33 -0700 Subject: [NET] core: whitespace cleanup Fix whitespace around keywords. Fix indentation especially of switch statements. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/sock.c | 712 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 354 insertions(+), 358 deletions(-) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index cb48fa0e124..792ae39804a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -361,8 +361,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, } #endif - if(optlensk_reuse = valbool; - break; - case SO_TYPE: - case SO_ERROR: - ret = -ENOPROTOOPT; - break; - case SO_DONTROUTE: - if (valbool) - sock_set_flag(sk, SOCK_LOCALROUTE); - else - sock_reset_flag(sk, SOCK_LOCALROUTE); - break; - case SO_BROADCAST: - sock_valbool_flag(sk, SOCK_BROADCAST, valbool); - break; - case SO_SNDBUF: - /* Don't error on this BSD doesn't and if you think - about it this is right. Otherwise apps have to - play 'guess the biggest size' games. RCVBUF/SNDBUF - are treated in BSD as hints */ - - if (val > sysctl_wmem_max) - val = sysctl_wmem_max; + switch(optname) { + case SO_DEBUG: + if (val && !capable(CAP_NET_ADMIN)) { + ret = -EACCES; + } + else if (valbool) + sock_set_flag(sk, SOCK_DBG); + else + sock_reset_flag(sk, SOCK_DBG); + break; + case SO_REUSEADDR: + sk->sk_reuse = valbool; + break; + case SO_TYPE: + case SO_ERROR: + ret = -ENOPROTOOPT; + break; + case SO_DONTROUTE: + if (valbool) + sock_set_flag(sk, SOCK_LOCALROUTE); + else + sock_reset_flag(sk, SOCK_LOCALROUTE); + break; + case SO_BROADCAST: + sock_valbool_flag(sk, SOCK_BROADCAST, valbool); + break; + case SO_SNDBUF: + /* Don't error on this BSD doesn't and if you think + about it this is right. Otherwise apps have to + play 'guess the biggest size' games. RCVBUF/SNDBUF + are treated in BSD as hints */ + + if (val > sysctl_wmem_max) + val = sysctl_wmem_max; set_sndbuf: - sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - if ((val * 2) < SOCK_MIN_SNDBUF) - sk->sk_sndbuf = SOCK_MIN_SNDBUF; - else - sk->sk_sndbuf = val * 2; + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; + if ((val * 2) < SOCK_MIN_SNDBUF) + sk->sk_sndbuf = SOCK_MIN_SNDBUF; + else + sk->sk_sndbuf = val * 2; - /* - * Wake up sending tasks if we - * upped the value. - */ - sk->sk_write_space(sk); - break; + /* + * Wake up sending tasks if we + * upped the value. + */ + sk->sk_write_space(sk); + break; - case SO_SNDBUFFORCE: - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - goto set_sndbuf; + case SO_SNDBUFFORCE: + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + break; + } + goto set_sndbuf; - case SO_RCVBUF: - /* Don't error on this BSD doesn't and if you think - about it this is right. Otherwise apps have to - play 'guess the biggest size' games. RCVBUF/SNDBUF - are treated in BSD as hints */ + case SO_RCVBUF: + /* Don't error on this BSD doesn't and if you think + about it this is right. Otherwise apps have to + play 'guess the biggest size' games. RCVBUF/SNDBUF + are treated in BSD as hints */ - if (val > sysctl_rmem_max) - val = sysctl_rmem_max; + if (val > sysctl_rmem_max) + val = sysctl_rmem_max; set_rcvbuf: - sk->sk_userlocks |= SOCK_RCVBUF_LOCK; - /* - * We double it on the way in to account for - * "struct sk_buff" etc. overhead. Applications - * assume that the SO_RCVBUF setting they make will - * allow that much actual data to be received on that - * socket. - * - * Applications are unaware that "struct sk_buff" and - * other overheads allocate from the receive buffer - * during socket buffer allocation. - * - * And after considering the possible alternatives, - * returning the value we actually used in getsockopt - * is the most desirable behavior. - */ - if ((val * 2) < SOCK_MIN_RCVBUF) - sk->sk_rcvbuf = SOCK_MIN_RCVBUF; - else - sk->sk_rcvbuf = val * 2; + sk->sk_userlocks |= SOCK_RCVBUF_LOCK; + /* + * We double it on the way in to account for + * "struct sk_buff" etc. overhead. Applications + * assume that the SO_RCVBUF setting they make will + * allow that much actual data to be received on that + * socket. + * + * Applications are unaware that "struct sk_buff" and + * other overheads allocate from the receive buffer + * during socket buffer allocation. + * + * And after considering the possible alternatives, + * returning the value we actually used in getsockopt + * is the most desirable behavior. + */ + if ((val * 2) < SOCK_MIN_RCVBUF) + sk->sk_rcvbuf = SOCK_MIN_RCVBUF; + else + sk->sk_rcvbuf = val * 2; + break; + + case SO_RCVBUFFORCE: + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; break; + } + goto set_rcvbuf; - case SO_RCVBUFFORCE: - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - goto set_rcvbuf; - - case SO_KEEPALIVE: + case SO_KEEPALIVE: #ifdef CONFIG_INET - if (sk->sk_protocol == IPPROTO_TCP) - tcp_set_keepalive(sk, valbool); + if (sk->sk_protocol == IPPROTO_TCP) + tcp_set_keepalive(sk, valbool); #endif - sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); - break; - - case SO_OOBINLINE: - sock_valbool_flag(sk, SOCK_URGINLINE, valbool); + sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); + break; + + case SO_OOBINLINE: + sock_valbool_flag(sk, SOCK_URGINLINE, valbool); + break; + + case SO_NO_CHECK: + sk->sk_no_check = valbool; + break; + + case SO_PRIORITY: + if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) + sk->sk_priority = val; + else + ret = -EPERM; + break; + + case SO_LINGER: + if (optlen < sizeof(ling)) { + ret = -EINVAL; /* 1003.1g */ break; - - case SO_NO_CHECK: - sk->sk_no_check = valbool; - break; - - case SO_PRIORITY: - if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) - sk->sk_priority = val; - else - ret = -EPERM; + } + if (copy_from_user(&ling,optval,sizeof(ling))) { + ret = -EFAULT; break; - - case SO_LINGER: - if(optlen= MAX_SCHEDULE_TIMEOUT/HZ) - sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; - else -#endif - sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; - sock_set_flag(sk, SOCK_LINGER); - } - break; - - case SO_BSDCOMPAT: - sock_warn_obsolete_bsdism("setsockopt"); - break; - - case SO_PASSCRED: - if (valbool) - set_bit(SOCK_PASSCRED, &sock->flags); + if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) + sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; else - clear_bit(SOCK_PASSCRED, &sock->flags); - break; - - case SO_TIMESTAMP: - if (valbool) { - sock_set_flag(sk, SOCK_RCVTSTAMP); - sock_enable_timestamp(sk); - } else - sock_reset_flag(sk, SOCK_RCVTSTAMP); - break; - - case SO_RCVLOWAT: - if (val < 0) - val = INT_MAX; - sk->sk_rcvlowat = val ? : 1; - break; +#endif + sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; + sock_set_flag(sk, SOCK_LINGER); + } + break; + + case SO_BSDCOMPAT: + sock_warn_obsolete_bsdism("setsockopt"); + break; + + case SO_PASSCRED: + if (valbool) + set_bit(SOCK_PASSCRED, &sock->flags); + else + clear_bit(SOCK_PASSCRED, &sock->flags); + break; + + case SO_TIMESTAMP: + if (valbool) { + sock_set_flag(sk, SOCK_RCVTSTAMP); + sock_enable_timestamp(sk); + } else + sock_reset_flag(sk, SOCK_RCVTSTAMP); + break; + + case SO_RCVLOWAT: + if (val < 0) + val = INT_MAX; + sk->sk_rcvlowat = val ? : 1; + break; + + case SO_RCVTIMEO: + ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); + break; + + case SO_SNDTIMEO: + ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); + break; - case SO_RCVTIMEO: - ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); - break; +#ifdef CONFIG_NETDEVICES + case SO_BINDTODEVICE: + { + char devname[IFNAMSIZ]; - case SO_SNDTIMEO: - ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); + /* Sorry... */ + if (!capable(CAP_NET_RAW)) { + ret = -EPERM; break; + } -#ifdef CONFIG_NETDEVICES - case SO_BINDTODEVICE: - { - char devname[IFNAMSIZ]; + /* Bind this socket to a particular device like "eth0", + * as specified in the passed interface name. If the + * name is "" or the option length is zero the socket + * is not bound. + */ - /* Sorry... */ - if (!capable(CAP_NET_RAW)) { - ret = -EPERM; + if (!valbool) { + sk->sk_bound_dev_if = 0; + } else { + if (optlen > IFNAMSIZ - 1) + optlen = IFNAMSIZ - 1; + memset(devname, 0, sizeof(devname)); + if (copy_from_user(devname, optval, optlen)) { + ret = -EFAULT; break; } - /* Bind this socket to a particular device like "eth0", - * as specified in the passed interface name. If the - * name is "" or the option length is zero the socket - * is not bound. - */ + /* Remove any cached route for this socket. */ + sk_dst_reset(sk); - if (!valbool) { + if (devname[0] == '\0') { sk->sk_bound_dev_if = 0; } else { - if (optlen > IFNAMSIZ - 1) - optlen = IFNAMSIZ - 1; - memset(devname, 0, sizeof(devname)); - if (copy_from_user(devname, optval, optlen)) { - ret = -EFAULT; + struct net_device *dev = dev_get_by_name(devname); + if (!dev) { + ret = -ENODEV; break; } - - /* Remove any cached route for this socket. */ - sk_dst_reset(sk); - - if (devname[0] == '\0') { - sk->sk_bound_dev_if = 0; - } else { - struct net_device *dev = dev_get_by_name(devname); - if (!dev) { - ret = -ENODEV; - break; - } - sk->sk_bound_dev_if = dev->ifindex; - dev_put(dev); - } + sk->sk_bound_dev_if = dev->ifindex; + dev_put(dev); } - break; } + break; + } #endif - case SO_ATTACH_FILTER: - ret = -EINVAL; - if (optlen == sizeof(struct sock_fprog)) { - struct sock_fprog fprog; - - ret = -EFAULT; - if (copy_from_user(&fprog, optval, sizeof(fprog))) - break; - - ret = sk_attach_filter(&fprog, sk); - } - break; + case SO_ATTACH_FILTER: + ret = -EINVAL; + if (optlen == sizeof(struct sock_fprog)) { + struct sock_fprog fprog; - case SO_DETACH_FILTER: - rcu_read_lock_bh(); - filter = rcu_dereference(sk->sk_filter); - if (filter) { - rcu_assign_pointer(sk->sk_filter, NULL); - sk_filter_release(sk, filter); - rcu_read_unlock_bh(); + ret = -EFAULT; + if (copy_from_user(&fprog, optval, sizeof(fprog))) break; - } + + ret = sk_attach_filter(&fprog, sk); + } + break; + + case SO_DETACH_FILTER: + rcu_read_lock_bh(); + filter = rcu_dereference(sk->sk_filter); + if (filter) { + rcu_assign_pointer(sk->sk_filter, NULL); + sk_filter_release(sk, filter); rcu_read_unlock_bh(); - ret = -ENONET; break; + } + rcu_read_unlock_bh(); + ret = -ENONET; + break; - case SO_PASSSEC: - if (valbool) - set_bit(SOCK_PASSSEC, &sock->flags); - else - clear_bit(SOCK_PASSSEC, &sock->flags); - break; + case SO_PASSSEC: + if (valbool) + set_bit(SOCK_PASSSEC, &sock->flags); + else + clear_bit(SOCK_PASSSEC, &sock->flags); + break; /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ - default: - ret = -ENOPROTOOPT; - break; + default: + ret = -ENOPROTOOPT; + break; } release_sock(sk); return ret; @@ -641,8 +639,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; - union - { + union { int val; struct linger ling; struct timeval tm; @@ -651,148 +648,148 @@ int sock_getsockopt(struct socket *sock, int level, int optname, unsigned int lv = sizeof(int); int len; - if(get_user(len,optlen)) + if (get_user(len, optlen)) return -EFAULT; - if(len < 0) + if (len < 0) return -EINVAL; - switch(optname) - { - case SO_DEBUG: - v.val = sock_flag(sk, SOCK_DBG); - break; - - case SO_DONTROUTE: - v.val = sock_flag(sk, SOCK_LOCALROUTE); - break; - - case SO_BROADCAST: - v.val = !!sock_flag(sk, SOCK_BROADCAST); - break; - - case SO_SNDBUF: - v.val = sk->sk_sndbuf; - break; - - case SO_RCVBUF: - v.val = sk->sk_rcvbuf; - break; - - case SO_REUSEADDR: - v.val = sk->sk_reuse; - break; - - case SO_KEEPALIVE: - v.val = !!sock_flag(sk, SOCK_KEEPOPEN); - break; - - case SO_TYPE: - v.val = sk->sk_type; - break; - - case SO_ERROR: - v.val = -sock_error(sk); - if(v.val==0) - v.val = xchg(&sk->sk_err_soft, 0); - break; - - case SO_OOBINLINE: - v.val = !!sock_flag(sk, SOCK_URGINLINE); - break; - - case SO_NO_CHECK: - v.val = sk->sk_no_check; - break; - - case SO_PRIORITY: - v.val = sk->sk_priority; - break; - - case SO_LINGER: - lv = sizeof(v.ling); - v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); - v.ling.l_linger = sk->sk_lingertime / HZ; - break; - - case SO_BSDCOMPAT: - sock_warn_obsolete_bsdism("getsockopt"); - break; - - case SO_TIMESTAMP: - v.val = sock_flag(sk, SOCK_RCVTSTAMP); - break; - - case SO_RCVTIMEO: - lv=sizeof(struct timeval); - if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { - v.tm.tv_sec = 0; - v.tm.tv_usec = 0; - } else { - v.tm.tv_sec = sk->sk_rcvtimeo / HZ; - v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; - } - break; + switch(optname) { + case SO_DEBUG: + v.val = sock_flag(sk, SOCK_DBG); + break; + + case SO_DONTROUTE: + v.val = sock_flag(sk, SOCK_LOCALROUTE); + break; + + case SO_BROADCAST: + v.val = !!sock_flag(sk, SOCK_BROADCAST); + break; + + case SO_SNDBUF: + v.val = sk->sk_sndbuf; + break; + + case SO_RCVBUF: + v.val = sk->sk_rcvbuf; + break; + + case SO_REUSEADDR: + v.val = sk->sk_reuse; + break; + + case SO_KEEPALIVE: + v.val = !!sock_flag(sk, SOCK_KEEPOPEN); + break; + + case SO_TYPE: + v.val = sk->sk_type; + break; + + case SO_ERROR: + v.val = -sock_error(sk); + if (v.val==0) + v.val = xchg(&sk->sk_err_soft, 0); + break; + + case SO_OOBINLINE: + v.val = !!sock_flag(sk, SOCK_URGINLINE); + break; + + case SO_NO_CHECK: + v.val = sk->sk_no_check; + break; + + case SO_PRIORITY: + v.val = sk->sk_priority; + break; + + case SO_LINGER: + lv = sizeof(v.ling); + v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); + v.ling.l_linger = sk->sk_lingertime / HZ; + break; + + case SO_BSDCOMPAT: + sock_warn_obsolete_bsdism("getsockopt"); + break; + + case SO_TIMESTAMP: + v.val = sock_flag(sk, SOCK_RCVTSTAMP); + break; + + case SO_RCVTIMEO: + lv=sizeof(struct timeval); + if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { + v.tm.tv_sec = 0; + v.tm.tv_usec = 0; + } else { + v.tm.tv_sec = sk->sk_rcvtimeo / HZ; + v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; + } + break; + + case SO_SNDTIMEO: + lv=sizeof(struct timeval); + if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { + v.tm.tv_sec = 0; + v.tm.tv_usec = 0; + } else { + v.tm.tv_sec = sk->sk_sndtimeo / HZ; + v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; + } + break; - case SO_SNDTIMEO: - lv=sizeof(struct timeval); - if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { - v.tm.tv_sec = 0; - v.tm.tv_usec = 0; - } else { - v.tm.tv_sec = sk->sk_sndtimeo / HZ; - v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; - } - break; + case SO_RCVLOWAT: + v.val = sk->sk_rcvlowat; + break; - case SO_RCVLOWAT: - v.val = sk->sk_rcvlowat; - break; + case SO_SNDLOWAT: + v.val=1; + break; - case SO_SNDLOWAT: - v.val=1; - break; + case SO_PASSCRED: + v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; + break; - case SO_PASSCRED: - v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; - break; + case SO_PEERCRED: + if (len > sizeof(sk->sk_peercred)) + len = sizeof(sk->sk_peercred); + if (copy_to_user(optval, &sk->sk_peercred, len)) + return -EFAULT; + goto lenout; - case SO_PEERCRED: - if (len > sizeof(sk->sk_peercred)) - len = sizeof(sk->sk_peercred); - if (copy_to_user(optval, &sk->sk_peercred, len)) - return -EFAULT; - goto lenout; - - case SO_PEERNAME: - { - char address[128]; - - if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) - return -ENOTCONN; - if (lv < len) - return -EINVAL; - if (copy_to_user(optval, address, len)) - return -EFAULT; - goto lenout; - } + case SO_PEERNAME: + { + char address[128]; + + if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) + return -ENOTCONN; + if (lv < len) + return -EINVAL; + if (copy_to_user(optval, address, len)) + return -EFAULT; + goto lenout; + } - /* Dubious BSD thing... Probably nobody even uses it, but - * the UNIX standard wants it for whatever reason... -DaveM - */ - case SO_ACCEPTCONN: - v.val = sk->sk_state == TCP_LISTEN; - break; + /* Dubious BSD thing... Probably nobody even uses it, but + * the UNIX standard wants it for whatever reason... -DaveM + */ + case SO_ACCEPTCONN: + v.val = sk->sk_state == TCP_LISTEN; + break; - case SO_PASSSEC: - v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; - break; + case SO_PASSSEC: + v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; + break; - case SO_PEERSEC: - return security_socket_getpeersec_stream(sock, optval, optlen, len); + case SO_PEERSEC: + return security_socket_getpeersec_stream(sock, optval, optlen, len); - default: - return(-ENOPROTOOPT); + default: + return -ENOPROTOOPT; } + if (len > lv) len = lv; if (copy_to_user(optval, &v, len)) @@ -1220,13 +1217,13 @@ static void __lock_sock(struct sock *sk) { DEFINE_WAIT(wait); - for(;;) { + for (;;) { prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_bh(&sk->sk_lock.slock); schedule(); spin_lock_bh(&sk->sk_lock.slock); - if(!sock_owned_by_user(sk)) + if (!sock_owned_by_user(sk)) break; } finish_wait(&sk->sk_lock.wq, &wait); @@ -1258,7 +1255,7 @@ static void __release_sock(struct sock *sk) } while (skb != NULL); bh_lock_sock(sk); - } while((skb = sk->sk_backlog.head) != NULL); + } while ((skb = sk->sk_backlog.head) != NULL); } /** @@ -1420,7 +1417,7 @@ static void sock_def_write_space(struct sock *sk) /* Do not wake up a writer until he can make "significant" * progress. --DaveM */ - if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { + if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) wake_up_interruptible(sk->sk_sleep); @@ -1482,8 +1479,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sock_set_flag(sk, SOCK_ZAPPED); - if(sock) - { + if (sock) { sk->sk_type = sock->type; sk->sk_sleep = &sock->wait; sock->sk = sk; -- cgit v1.2.3-18-g5258 From 92f37fd2ee805aa77925c1e64fd56088b46094fc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 25 Mar 2007 22:14:49 -0700 Subject: [NET]: Adding SO_TIMESTAMPNS / SCM_TIMESTAMPNS support Now that network timestamps use ktime_t infrastructure, we can add a new SOL_SOCKET sockopt SO_TIMESTAMPNS. This command is similar to SO_TIMESTAMP, but permits transmission of a 'timespec struct' instead of a 'timeval struct' control message. (nanosecond resolution instead of microsecond) Control message is labelled SCM_TIMESTAMPNS instead of SCM_TIMESTAMP A socket cannot mix SO_TIMESTAMP and SO_TIMESTAMPNS : the two modes are mutually exclusive. sock_recv_timestamp() became too big to be fully inlined so I added a __sock_recv_timestamp() helper function. Signed-off-by: Eric Dumazet CC: linux-arch@vger.kernel.org Signed-off-by: David S. Miller --- net/core/sock.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index 792ae39804a..f9e6991d372 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -521,11 +521,18 @@ set_rcvbuf: break; case SO_TIMESTAMP: + case SO_TIMESTAMPNS: if (valbool) { + if (optname == SO_TIMESTAMP) + sock_reset_flag(sk, SOCK_RCVTSTAMPNS); + else + sock_set_flag(sk, SOCK_RCVTSTAMPNS); sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk); - } else + } else { sock_reset_flag(sk, SOCK_RCVTSTAMP); + sock_reset_flag(sk, SOCK_RCVTSTAMPNS); + } break; case SO_RCVLOWAT: @@ -715,7 +722,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_TIMESTAMP: - v.val = sock_flag(sk, SOCK_RCVTSTAMP); + v.val = sock_flag(sk, SOCK_RCVTSTAMP) && + !sock_flag(sk, SOCK_RCVTSTAMPNS); + break; + + case SO_TIMESTAMPNS: + v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); break; case SO_RCVTIMEO: -- cgit v1.2.3-18-g5258 From f690808e17925fc45217eb22e8670902ecee5c1b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Mon, 12 Mar 2007 14:34:29 -0700 Subject: [NET]: make seq_operations const The seq_file operations stuff can be marked constant to get it out of dirty cache. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index f9e6991d372..73a8018029a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1925,7 +1925,7 @@ static int proto_seq_show(struct seq_file *seq, void *v) return 0; } -static struct seq_operations proto_seq_ops = { +static const struct seq_operations proto_seq_ops = { .start = proto_seq_start, .next = proto_seq_next, .stop = proto_seq_stop, -- cgit v1.2.3-18-g5258 From 9958089a43ae8a9af07402461c0b2b7548c7341e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 20 Apr 2007 17:12:43 -0700 Subject: [NET]: Move sk_setup_caps() out of line. It is far too large to be an inline and not in any hot paths. Signed-off-by: Andi Kleen Signed-off-by: David S. Miller --- net/core/sock.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index 73a8018029a..043bdc05d21 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -979,6 +979,21 @@ out: EXPORT_SYMBOL_GPL(sk_clone); +void sk_setup_caps(struct sock *sk, struct dst_entry *dst) +{ + __sk_dst_set(sk, dst); + sk->sk_route_caps = dst->dev->features; + if (sk->sk_route_caps & NETIF_F_GSO) + sk->sk_route_caps |= NETIF_F_GSO_MASK; + if (sk_can_gso(sk)) { + if (dst->header_len) + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + else + sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; + } +} +EXPORT_SYMBOL_GPL(sk_setup_caps); + void __init sk_init(void) { if (num_physpages <= 4096) { -- cgit v1.2.3-18-g5258 From 17926a79320afa9b95df6b977b40cca6d8713cea Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 26 Apr 2007 15:48:28 -0700 Subject: [AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both Provide AF_RXRPC sockets that can be used to talk to AFS servers, or serve answers to AFS clients. KerberosIV security is fully supported. The patches and some example test programs can be found in: http://people.redhat.com/~dhowells/rxrpc/ This will eventually replace the old implementation of kernel-only RxRPC currently resident in net/rxrpc/. Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/core/sock.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net/core/sock.c') diff --git a/net/core/sock.c b/net/core/sock.c index 043bdc05d21..22183c2ef28 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = { "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , "sk_lock-27" , "sk_lock-28" , "sk_lock-29" , - "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX" + "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , + "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX" }; static const char *af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , @@ -167,7 +168,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = { "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" , "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , "slock-27" , "slock-28" , "slock-29" , - "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_MAX" + "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , + "slock-AF_RXRPC" , "slock-AF_MAX" }; #endif -- cgit v1.2.3-18-g5258