aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_fd.c112
-rw-r--r--net/atm/br2684.c11
-rw-r--r--net/atm/lec.c10
-rw-r--r--net/compat.c11
-rw-r--r--net/core/dev.c21
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/ipv4/Kconfig6
-rw-r--r--net/ipv4/inet_hashtables.c24
-rw-r--r--net/ipv4/inet_timewait_sock.c61
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c21
-rw-r--r--net/ipv4/syncookies.c27
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_input.c59
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/tcp_timer.c29
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c7
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/syncookies.c28
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh.h5
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/rx.c1
-rw-r--r--net/mac80211/scan.c20
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/packet/af_packet.c71
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/socket.c125
-rw-r--r--net/sunrpc/addr.c10
-rw-r--r--net/sunrpc/auth.c39
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c6
-rw-r--r--net/sunrpc/clnt.c54
-rw-r--r--net/sunrpc/rpcb_clnt.c104
-rw-r--r--net/sunrpc/sched.c15
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/svc_xprt.c31
-rw-r--r--net/sunrpc/svcauth_unix.c53
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/wireless/reg.c75
-rw-r--r--net/wireless/wext-compat.c1
63 files changed, 676 insertions, 542 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4dd873e3a1b..be1cb909d8c 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -42,6 +42,8 @@
#include <net/9p/client.h>
#include <net/9p/transport.h>
+#include <linux/syscalls.h> /* killme */
+
#define P9_PORT 564
#define MAX_SOCK_BUF (64*1024)
#define MAXPOLLWADDR 2
@@ -788,24 +790,41 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
static int p9_socket_open(struct p9_client *client, struct socket *csocket)
{
- int fd, ret;
+ struct p9_trans_fd *p;
+ int ret, fd;
+
+ p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
csocket->sk->sk_allocation = GFP_NOIO;
fd = sock_map_fd(csocket, 0);
if (fd < 0) {
P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
+ sock_release(csocket);
+ kfree(p);
return fd;
}
- ret = p9_fd_open(client, fd, fd);
- if (ret < 0) {
- P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
+ get_file(csocket->file);
+ get_file(csocket->file);
+ p->wr = p->rd = csocket->file;
+ client->trans = p;
+ client->status = Connected;
+
+ sys_close(fd); /* still racy */
+
+ p->rd->f_flags |= O_NONBLOCK;
+
+ p->conn = p9_conn_create(client);
+ if (IS_ERR(p->conn)) {
+ ret = PTR_ERR(p->conn);
+ p->conn = NULL;
+ kfree(p);
+ sockfd_put(csocket);
sockfd_put(csocket);
return ret;
}
-
- ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
-
return 0;
}
@@ -883,7 +902,6 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
struct socket *csocket;
struct sockaddr_in sin_server;
struct p9_fd_opts opts;
- struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
err = parse_opts(args, &opts);
if (err < 0)
@@ -897,12 +915,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
sin_server.sin_family = AF_INET;
sin_server.sin_addr.s_addr = in_aton(addr);
sin_server.sin_port = htons(opts.port);
- sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
+ err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
- if (!csocket) {
+ if (err) {
P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
- err = -EIO;
- goto error;
+ return err;
}
err = csocket->ops->connect(csocket,
@@ -912,30 +929,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
P9_EPRINTK(KERN_ERR,
"p9_trans_tcp: problem connecting socket to %s\n",
addr);
- goto error;
- }
-
- err = p9_socket_open(client, csocket);
- if (err < 0)
- goto error;
-
- p = (struct p9_trans_fd *) client->trans;
- p->conn = p9_conn_create(client);
- if (IS_ERR(p->conn)) {
- err = PTR_ERR(p->conn);
- p->conn = NULL;
- goto error;
- }
-
- return 0;
-
-error:
- if (csocket)
sock_release(csocket);
+ return err;
+ }
- kfree(p);
-
- return err;
+ return p9_socket_open(client, csocket);
}
static int
@@ -944,49 +942,33 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
int err;
struct socket *csocket;
struct sockaddr_un sun_server;
- struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
csocket = NULL;
if (strlen(addr) > UNIX_PATH_MAX) {
P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
addr);
- err = -ENAMETOOLONG;
- goto error;
+ return -ENAMETOOLONG;
}
sun_server.sun_family = PF_UNIX;
strcpy(sun_server.sun_path, addr);
- sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
+ err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
+ if (err < 0) {
+ P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
+ return err;
+ }
err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
sizeof(struct sockaddr_un) - 1, 0);
if (err < 0) {
P9_EPRINTK(KERN_ERR,
"p9_trans_unix: problem connecting socket: %s: %d\n",
addr, err);
- goto error;
- }
-
- err = p9_socket_open(client, csocket);
- if (err < 0)
- goto error;
-
- p = (struct p9_trans_fd *) client->trans;
- p->conn = p9_conn_create(client);
- if (IS_ERR(p->conn)) {
- err = PTR_ERR(p->conn);
- p->conn = NULL;
- goto error;
- }
-
- return 0;
-
-error:
- if (csocket)
sock_release(csocket);
+ return err;
+ }
- kfree(p);
- return err;
+ return p9_socket_open(client, csocket);
}
static int
@@ -994,7 +976,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
{
int err;
struct p9_fd_opts opts;
- struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
+ struct p9_trans_fd *p;
parse_opts(args, &opts);
@@ -1005,21 +987,19 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
err = p9_fd_open(client, opts.rfd, opts.wfd);
if (err < 0)
- goto error;
+ return err;
p = (struct p9_trans_fd *) client->trans;
p->conn = p9_conn_create(client);
if (IS_ERR(p->conn)) {
err = PTR_ERR(p->conn);
p->conn = NULL;
- goto error;
+ fput(p->rd);
+ fput(p->wr);
+ return err;
}
return 0;
-
-error:
- kfree(p);
- return err;
}
static struct p9_trans_module p9_tcp_trans = {
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 26a646d4eb3..c9230c39869 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -554,6 +554,12 @@ static const struct net_device_ops br2684_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static const struct net_device_ops br2684_netdev_ops_routed = {
+ .ndo_start_xmit = br2684_start_xmit,
+ .ndo_set_mac_address = br2684_mac_addr,
+ .ndo_change_mtu = eth_change_mtu
+};
+
static void br2684_setup(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
@@ -569,11 +575,10 @@ static void br2684_setup(struct net_device *netdev)
static void br2684_setup_routed(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
- brdev->net_dev = netdev;
+ brdev->net_dev = netdev;
netdev->hard_header_len = 0;
-
- netdev->netdev_ops = &br2684_netdev_ops;
+ netdev->netdev_ops = &br2684_netdev_ops_routed;
netdev->addr_len = 0;
netdev->mtu = 1500;
netdev->type = ARPHRD_PPP;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index b2d64456032..42749b7b917 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -62,7 +62,6 @@ static int lec_open(struct net_device *dev);
static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int lec_close(struct net_device *dev);
-static void lec_init(struct net_device *dev);
static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
const unsigned char *mac_addr);
static int lec_arp_remove(struct lec_priv *priv,
@@ -670,13 +669,6 @@ static const struct net_device_ops lec_netdev_ops = {
.ndo_set_multicast_list = lec_set_multicast_list,
};
-
-static void lec_init(struct net_device *dev)
-{
- dev->netdev_ops = &lec_netdev_ops;
- printk("%s: Initialized!\n", dev->name);
-}
-
static const unsigned char lec_ctrl_magic[] = {
0xff,
0x00,
@@ -893,6 +885,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
dev_lec[i] = alloc_etherdev(size);
if (!dev_lec[i])
return -ENOMEM;
+ dev_lec[i]->netdev_ops = &lec_netdev_ops;
snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
if (register_netdev(dev_lec[i])) {
free_netdev(dev_lec[i]);
@@ -901,7 +894,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
priv = netdev_priv(dev_lec[i]);
priv->is_trdev = is_trdev;
- lec_init(dev_lec[i]);
} else {
priv = netdev_priv(dev_lec[i]);
if (priv->lecd)
diff --git a/net/compat.c b/net/compat.c
index e1a56ade803..a1fb1b079a8 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
- struct timespec __user *timeout)
+ struct compat_timespec __user *timeout)
{
int datagrams;
struct timespec ktspec;
- struct compat_timespec __user *utspec;
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
- utspec = (struct compat_timespec __user *)timeout;
- if (get_user(ktspec.tv_sec, &utspec->tv_sec) ||
- get_user(ktspec.tv_nsec, &utspec->tv_nsec))
+ if (get_compat_timespec(&ktspec, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, &ktspec);
- if (datagrams > 0 &&
- (put_user(ktspec.tv_sec, &utspec->tv_sec) ||
- put_user(ktspec.tv_nsec, &utspec->tv_nsec)))
+ if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
datagrams = -EFAULT;
return datagrams;
diff --git a/net/core/dev.c b/net/core/dev.c
index c36a17aafcf..be9924f60ec 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev)
static void rollback_registered_many(struct list_head *head)
{
- struct net_device *dev;
+ struct net_device *dev, *tmp;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
- list_for_each_entry(dev, head, unreg_list) {
+ list_for_each_entry_safe(dev, tmp, head, unreg_list) {
/* Some devices call without registering
- * for initialization unwind.
+ * for initialization unwind. Remove those
+ * devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never "
"was registered\n", dev->name, dev);
WARN_ON(1);
- return;
+ list_del(&dev->unreg_list);
+ continue;
}
BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev)
rollback_registered(dev);
dev->reg_state = NETREG_UNREGISTERED;
}
+ /*
+ * Prevent userspace races by waiting until the network
+ * device is fully setup before sending notifications.
+ */
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
out:
return ret;
@@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Notify protocols, that a new device appeared. */
call_netdevice_notifiers(NETDEV_REGISTER, dev);
+ /*
+ * Prevent userspace races by waiting until the network
+ * device is fully setup before sending notifications.
+ */
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+
synchronize_net();
err = 0;
out:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33148a56819..794bcb897ff 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_UNREGISTER:
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
break;
- case NETDEV_REGISTER:
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
- break;
case NETDEV_UP:
case NETDEV_DOWN:
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
break;
+ case NETDEV_POST_INIT:
+ case NETDEV_REGISTER:
case NETDEV_CHANGE:
case NETDEV_GOING_DOWN:
+ case NETDEV_UNREGISTER_BATCH:
break;
default:
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bfa3e7865a8..93c4e060c91 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -93,7 +93,7 @@ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
/* Pipe buffer operations for a socket. */
-static struct pipe_buf_operations sock_pipe_buf_ops = {
+static const struct pipe_buf_operations sock_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index efbcfdc1279..dad7bc4878e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
dccp_sync_mss(newsk, dst_mtu(dst));
- __inet_hash_nolisten(newsk);
+ __inet_hash_nolisten(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6574215a1f5..baf05cf43c2 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -46,7 +46,7 @@ static void dccp_v6_hash(struct sock *sk)
return;
}
local_bh_disable();
- __inet6_hash(sk);
+ __inet6_hash(sk, NULL);
local_bh_enable();
}
}
@@ -644,7 +644,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
- __inet6_hash(newsk);
+ __inet6_hash(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 70491d9035e..0c94a1ac294 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -166,7 +166,7 @@ config IP_PNP_DHCP
If unsure, say Y. Note that if you want to use DHCP, a DHCP server
must be operating on your network. Read
- <file:Documentation/filesystems/nfsroot.txt> for details.
+ <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
config IP_PNP_BOOTP
bool "IP: BOOTP support"
@@ -181,7 +181,7 @@ config IP_PNP_BOOTP
does BOOTP itself, providing all necessary information on the kernel
command line, you can say N here. If unsure, say Y. Note that if you
want to use BOOTP, a BOOTP server must be operating on your network.
- Read <file:Documentation/filesystems/nfsroot.txt> for details.
+ Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
config IP_PNP_RARP
bool "IP: RARP support"
@@ -194,7 +194,7 @@ config IP_PNP_RARP
older protocol which is being obsoleted by BOOTP and DHCP), say Y
here. Note that if you want to use RARP, a RARP server must be
operating on your network. Read
- <file:Documentation/filesystems/nfsroot.txt> for details.
+ <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
# not yet ready..
# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 21e5e32d8c6..2b79377b468 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -351,12 +351,13 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
inet->inet_dport);
}
-void __inet_hash_nolisten(struct sock *sk)
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
spinlock_t *lock;
struct inet_ehash_bucket *head;
+ int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
@@ -367,8 +368,13 @@ void __inet_hash_nolisten(struct sock *sk)
spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
+ if (tw) {
+ WARN_ON(sk->sk_hash != tw->tw_hash);
+ twrefcnt = inet_twsk_unhash(tw);
+ }
spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ return twrefcnt;
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
@@ -378,7 +384,7 @@ static void __inet_hash(struct sock *sk)
struct inet_listen_hashbucket *ilb;
if (sk->sk_state != TCP_LISTEN) {
- __inet_hash_nolisten(sk);
+ __inet_hash_nolisten(sk, NULL);
return;
}
@@ -427,7 +433,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
- void (*hash)(struct sock *sk))
+ int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
const unsigned short snum = inet_sk(sk)->inet_num;
@@ -435,6 +441,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct inet_bind_bucket *tb;
int ret;
struct net *net = sock_net(sk);
+ int twrefcnt = 1;
if (!snum) {
int i, remaining, low, high, port;
@@ -493,13 +500,18 @@ ok:
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
- hash(sk);
+ twrefcnt += hash(sk, tw);
}
+ if (tw)
+ twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
spin_unlock(&head->lock);
if (tw) {
inet_twsk_deschedule(tw, death_row);
- inet_twsk_put(tw);
+ while (twrefcnt) {
+ twrefcnt--;
+ inet_twsk_put(tw);
+ }
}
ret = 0;
@@ -510,7 +522,7 @@ ok:
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- hash(sk);
+ hash(sk, NULL);
spin_unlock_bh(&head->lock);
return 0;
} else {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 0fdf45e4c90..cc94cc2d8b2 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -15,9 +15,13 @@
#include <net/ip.h>
-/*
- * unhash a timewait socket from established hash
- * lock must be hold by caller
+/**
+ * inet_twsk_unhash - unhash a timewait socket from established hash
+ * @tw: timewait socket
+ *
+ * unhash a timewait socket from established hash, if hashed.
+ * ehash lock must be held by caller.
+ * Returns 1 if caller should call inet_twsk_put() after lock release.
*/
int inet_twsk_unhash(struct inet_timewait_sock *tw)
{
@@ -26,6 +30,37 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
hlist_nulls_del_rcu(&tw->tw_node);
sk_nulls_node_init(&tw->tw_node);
+ /*
+ * We cannot call inet_twsk_put() ourself under lock,
+ * caller must call it for us.
+ */
+ return 1;
+}
+
+/**
+ * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
+ * @tw: timewait socket
+ * @hashinfo: hashinfo pointer
+ *
+ * unhash a timewait socket from bind hash, if hashed.
+ * bind hash lock must be held by caller.
+ * Returns 1 if caller should call inet_twsk_put() after lock release.
+ */
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo)
+{
+ struct inet_bind_bucket *tb = tw->tw_tb;
+
+ if (!tb)
+ return 0;
+
+ __hlist_del(&tw->tw_bind_node);
+ tw->tw_tb = NULL;
+ inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
+ /*
+ * We cannot call inet_twsk_put() ourself under lock,
+ * caller must call it for us.
+ */
return 1;
}
@@ -34,7 +69,6 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo)
{
struct inet_bind_hashbucket *bhead;
- struct inet_bind_bucket *tb;
int refcnt;
/* Unlink from established hashes. */
spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
@@ -46,15 +80,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
/* Disassociate with bind bucket. */
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
hashinfo->bhash_size)];
+
spin_lock(&bhead->lock);
- tb = tw->tw_tb;
- if (tb) {
- __hlist_del(&tw->tw_bind_node);
- tw->tw_tb = NULL;
- inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
- refcnt++;
- }
+ refcnt += inet_twsk_bind_unhash(tw, hashinfo);
spin_unlock(&bhead->lock);
+
#ifdef SOCK_REFCNT_DEBUG
if (atomic_read(&tw->tw_refcnt) != 1) {
printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
@@ -126,7 +156,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
/*
* Notes :
- * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
+ * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
* - We add one reference for the bhash link
* - We add one reference for the ehash link
* - We want this refcnt update done before allowing other
@@ -136,7 +166,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_unlock(lock);
}
-
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
@@ -177,7 +206,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
return tw;
}
-
EXPORT_SYMBOL_GPL(inet_twsk_alloc);
/* Returns non-zero if quota exceeded. */
@@ -256,7 +284,6 @@ void inet_twdr_hangman(unsigned long data)
out:
spin_unlock(&twdr->death_lock);
}
-
EXPORT_SYMBOL_GPL(inet_twdr_hangman);
void inet_twdr_twkill_work(struct work_struct *work)
@@ -287,7 +314,6 @@ void inet_twdr_twkill_work(struct work_struct *work)
spin_unlock_bh(&twdr->death_lock);
}
}
-
EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
/* These are always called from BH context. See callers in
@@ -307,7 +333,6 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw,
spin_unlock(&twdr->death_lock);
__inet_twsk_kill(tw, twdr->hashinfo);
}
-
EXPORT_SYMBOL(inet_twsk_deschedule);
void inet_twsk_schedule(struct inet_timewait_sock *tw,
@@ -388,7 +413,6 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
mod_timer(&twdr->tw_timer, jiffies + twdr->period);
spin_unlock(&twdr->death_lock);
}
-
EXPORT_SYMBOL_GPL(inet_twsk_schedule);
void inet_twdr_twcal_tick(unsigned long data)
@@ -449,7 +473,6 @@ out:
#endif
spin_unlock(&twdr->death_lock);
}
-
EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 4e08b7f2331..10a6a604bf3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1446,7 +1446,7 @@ late_initcall(ip_auto_config);
/*
* Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel
- * command line parameter. See Documentation/filesystems/nfsroot.txt.
+ * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt.
*/
static int __init ic_proto_name(char *name)
{
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index fa2d6b6fc3e..331ead3ebd1 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -14,6 +14,7 @@
#include <net/route.h>
#include <net/ip.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
return err;
}
+static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING)
+ return IP_DEFRAG_CONNTRACK_IN;
+ else
+ return IP_DEFRAG_CONNTRACK_OUT;
+}
+
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
#endif
/* Gather fragments. */
if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
- if (nf_ct_ipv4_gather_frags(skb,
- hooknum == NF_INET_PRE_ROUTING ?
- IP_DEFRAG_CONNTRACK_IN :
- IP_DEFRAG_CONNTRACK_OUT))
+ enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
+ if (nf_ct_ipv4_gather_frags(skb, user))
return NF_STOLEN;
}
return NF_ACCEPT;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26399ad2a28..66fd80ef247 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+
+ if (tcp_opt.saw_tstamp)
+ cookie_check_timestamp(&tcp_opt);
+
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
if (!req)
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
ireq->ecn_ok = 0;
+ ireq->snd_wscale = tcp_opt.snd_wscale;
+ ireq->rcv_wscale = tcp_opt.rcv_wscale;
+ ireq->sack_ok = tcp_opt.sack_ok;
+ ireq->wscale_ok = tcp_opt.wscale_ok;
+ ireq->tstamp_ok = tcp_opt.saw_tstamp;
+ req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
}
}
- /* check for timestamp cookie support */
- memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
-
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
-
- ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
- ireq->sack_ok = tcp_opt.sack_ok;
- ireq->wscale_ok = tcp_opt.wscale_ok;
- ireq->tstamp_ok = tcp_opt.saw_tstamp;
- req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-
/* Try to redo what tcp_v4_send_synack did. */
req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c8666b70cde..b0a26bb25e2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2540,11 +2540,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
ctd.tcpct_cookie_desired = cvp->cookie_desired;
ctd.tcpct_s_data_desired = cvp->s_data_desired;
- /* Cookie(s) saved, return as nonce */
- if (sizeof(ctd.tcpct_value) < cvp->cookie_pair_size) {
- /* impossible? */
- return -EINVAL;
- }
memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
cvp->cookie_pair_size);
ctd.tcpct_used = cvp->cookie_pair_size;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 57ae96a0422..28e02963249 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2717,6 +2717,35 @@ static void tcp_try_undo_dsack(struct sock *sk)
}
}
+/* We can clear retrans_stamp when there are no retransmissions in the
+ * window. It would seem that it is trivially available for us in
+ * tp->retrans_out, however, that kind of assumptions doesn't consider
+ * what will happen if errors occur when sending retransmission for the
+ * second time. ...It could the that such segment has only
+ * TCPCB_EVER_RETRANS set at the present time. It seems that checking
+ * the head skb is enough except for some reneging corner cases that
+ * are not worth the effort.
+ *
+ * Main reason for all this complexity is the fact that connection dying
+ * time now depends on the validity of the retrans_stamp, in particular,
+ * that successive retransmissions of a segment must not advance
+ * retrans_stamp under any conditions.
+ */
+static int tcp_any_retrans_done(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+
+ if (tp->retrans_out)
+ return 1;
+
+ skb = tcp_write_queue_head(sk);
+ if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
+ return 1;
+
+ return 0;
+}
+
/* Undo during fast recovery after partial ACK. */
static int tcp_try_undo_partial(struct sock *sk, int acked)
@@ -2729,7 +2758,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
/* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit.
*/
- if (tp->retrans_out == 0)
+ if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
@@ -2788,7 +2817,7 @@ static void tcp_try_keep_open(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
int state = TCP_CA_Open;
- if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
+ if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
state = TCP_CA_Disorder;
if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2803,7 +2832,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
tcp_verify_left_out(tp);
- if (!tp->frto_counter && tp->retrans_out == 0)
+ if (!tp->frto_counter && !tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
if (flag & FLAG_ECE)
@@ -3698,7 +3727,7 @@ old_ack:
* the fast version below fails.
*/
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
- u8 **hvpp, int estab, struct dst_entry *dst)
+ u8 **hvpp, int estab)
{
unsigned char *ptr;
struct tcphdr *th = tcp_hdr(skb);
@@ -3737,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
break;
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW && th->syn &&
- !estab && sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
+ !estab && sysctl_tcp_window_scaling) {
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > 14) {
@@ -3754,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
case TCPOPT_TIMESTAMP:
if ((opsize == TCPOLEN_TIMESTAMP) &&
((estab && opt_rx->tstamp_ok) ||
- (!estab && sysctl_tcp_timestamps &&
- !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
+ (!estab && sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
opt_rx->rcv_tsval = get_unaligned_be32(ptr);
opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3763,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM && th->syn &&
- !estab && sysctl_tcp_sack &&
- !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
+ !estab && sysctl_tcp_sack) {
opt_rx->sack_ok = 1;
tcp_sack_reset(opt_rx);
}
@@ -3849,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
if (tcp_parse_aligned_timestamp(tp, th))
return 1;
}
- tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
+ tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
return 1;
}
@@ -4104,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
- if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
- !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
int mib_idx;
if (before(seq, tp->rcv_nxt))
@@ -4136,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(sk);
- if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
- !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5399,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
int saved_clamp = tp->rx_opt.mss_clamp;
- tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
if (th->ack) {
/* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 29002ab26e0..65b8ebfd078 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif
- ireq = inet_rsk(req);
- ireq->loc_addr = daddr;
- ireq->rmt_addr = saddr;
- ireq->no_srccheck = inet_sk(sk)->transparent;
- ireq->opt = tcp_v4_save_options(sk, skb);
-
- dst = inet_csk_route_req(sk, req);
- if(!dst)
- goto drop_and_free;
-
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
+ ireq = inet_rsk(req);
+ ireq->loc_addr = daddr;
+ ireq->rmt_addr = saddr;
+ ireq->no_srccheck = inet_sk(sk)->transparent;
+ ireq->opt = tcp_v4_save_options(sk, skb);
+
if (security_inet_conn_request(sk, skb, req))
- goto drop_and_release;
+ goto drop_and_free;
if (!want_cookie)
TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
+ (dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1464,7 +1461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
#endif
- __inet_hash_nolisten(newsk);
+ __inet_hash_nolisten(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 87accec8d09..f206ee5dda8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
int paws_reject = 0;
+ tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
- tmp_opt.tstamp_ok = 1;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
- if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) {
- tmp_opt.tstamp_ok = 1;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
+ tmp_opt.saw_tstamp = 0;
+ if (th->doff > (sizeof(struct tcphdr)>>2)) {
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 93316a96d82..383ce237640 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_md5sig_key **md5) {
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
- struct dst_entry *dst = __sk_dst_get(sk);
unsigned remaining = MAX_TCP_OPTION_SPACE;
u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
tcp_cookie_size_check(cvp->cookie_desired) :
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
opts->mss = tcp_advertise_mss(sk);
remaining -= TCPOLEN_MSS_ALIGNED;
- if (likely(sysctl_tcp_timestamps &&
- !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
- *md5 == NULL)) {
+ if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
- if (likely(sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
+ if (likely(sysctl_tcp_window_scaling)) {
opts->ws = tp->rx_opt.rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
- if (likely(sysctl_tcp_sack &&
- !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
+ if (likely(sysctl_tcp_sack)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!(OPTION_TS & opts->options)))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk)
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
*/
tp->tcp_header_len = sizeof(struct tcphdr) +
- (sysctl_tcp_timestamps &&
- (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
- TCPOLEN_TSTAMP_ALIGNED : 0));
+ (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk)
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
&tp->window_clamp,
- (sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)),
+ sysctl_tcp_window_scaling,
&rcv_wscale);
tp->rx_opt.rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8353a538cd4..8816a20c259 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -132,6 +132,35 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
}
}
+/* This function calculates a "timeout" which is equivalent to the timeout of a
+ * TCP connection after "boundary" unsucessful, exponentially backed-off
+ * retransmissions with an initial RTO of TCP_RTO_MIN.
+ */
+static bool retransmits_timed_out(struct sock *sk,
+ unsigned int boundary)
+{
+ unsigned int timeout, linear_backoff_thresh;
+ unsigned int start_ts;
+
+ if (!inet_csk(sk)->icsk_retransmits)
+ return false;
+
+ if (unlikely(!tcp_sk(sk)->retrans_stamp))
+ start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
+ else
+ start_ts = tcp_sk(sk)->retrans_stamp;
+
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
+
+ if (boundary <= linear_backoff_thresh)
+ timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
+ else
+ timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
+ (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+
+ return (tcp_time_stamp - start_ts) >= timeout;
+}
+
/* A write timeout has occurred. Process the after effects. */
static int tcp_write_timeout(struct sock *sk)
{
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f9534846ca..f0126fdd7e0 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
rand = (rand | 1) * (udptable->mask + 1);
- for (last = first + udptable->mask + 1;
- first != last;
- first++) {
+ last = first + udptable->mask + 1;
+ do {
hslot = udp_hashslot(udptable, net, first);
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
- }
+ } while (++first != last);
goto fail;
} else {
hslot = udp_hashslot(udptable, net, snum);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index c813e294ec0..633a6c26613 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -22,9 +22,10 @@
#include <net/inet6_hashtables.h>
#include <net/ip.h>
-void __inet6_hash(struct sock *sk)
+int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+ int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
@@ -45,10 +46,15 @@ void __inet6_hash(struct sock *sk)
lock = inet_ehash_lockp(hashinfo, hash);
spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
+ if (tw) {
+ WARN_ON(sk->sk_hash != tw->tw_hash);
+ twrefcnt = inet_twsk_unhash(tw);
+ }
spin_unlock(lock);
}
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ return twrefcnt;
}
EXPORT_SYMBOL(__inet6_hash);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 5f2ec208a8c..0956ebabbff 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -20,6 +20,7 @@
#include <net/ipv6.h>
#include <net/inet_frag.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -187,6 +188,21 @@ out:
return nf_conntrack_confirm(skb);
}
+static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING)
+ return IP6_DEFRAG_CONNTRACK_IN;
+ else
+ return IP6_DEFRAG_CONNTRACK_OUT;
+
+}
+
static unsigned int ipv6_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
if (skb->nfct)
return NF_ACCEPT;
- reasm = nf_ct_frag6_gather(skb);
-
+ reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
/* queued */
if (reasm == NULL)
return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e0b9424fa1b..312c20adc83 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -168,13 +168,14 @@ out:
/* Creation primitives. */
static __inline__ struct nf_ct_frag6_queue *
-fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
+fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
unsigned int hash;
arg.id = id;
+ arg.user = user;
arg.src = src;
arg.dst = dst;
@@ -559,7 +560,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
return 0;
}
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
{
struct sk_buff *clone;
struct net_device *dev = skb->dev;
@@ -605,7 +606,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
nf_ct_frag6_evictor();
- fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
+ fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
goto ret_orig;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4d98549a686..3b3a9560712 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -72,6 +72,7 @@ struct frag_queue
struct inet_frag_queue q;
__be32 id; /* fragment id */
+ u32 user;
struct in6_addr saddr;
struct in6_addr daddr;
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
struct ip6_create_arg *arg = a;
fq = container_of(q, struct frag_queue, q);
- return (fq->id == arg->id &&
+ return (fq->id == arg->id && fq->user == arg->user &&
ipv6_addr_equal(&fq->saddr, arg->src) &&
ipv6_addr_equal(&fq->daddr, arg->dst));
}
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
struct ip6_create_arg *arg = a;
fq->id = arg->id;
+ fq->user = arg->user;
ipv6_addr_copy(&fq->saddr, arg->src);
ipv6_addr_copy(&fq->daddr, arg->dst);
}
@@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
unsigned int hash;
arg.id = id;
+ arg.user = IP6_DEFRAG_LOCAL_DELIVER;
arg.src = src;
arg.dst = dst;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5b9af508b8f..7208a06576c 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+
+ if (tcp_opt.saw_tstamp)
+ cookie_check_timestamp(&tcp_opt);
+
ret = NULL;
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
if (!req)
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->expires = 0UL;
req->retrans = 0;
ireq->ecn_ok = 0;
+ ireq->snd_wscale = tcp_opt.snd_wscale;
+ ireq->rcv_wscale = tcp_opt.rcv_wscale;
+ ireq->sack_ok = tcp_opt.sack_ok;
+ ireq->wscale_ok = tcp_opt.wscale_ok;
+ ireq->tstamp_ok = tcp_opt.saw_tstamp;
+ req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out_free;
}
- /* check for timestamp cookie support */
- memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
-
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
-
- req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-
- ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
- ireq->sack_ok = tcp_opt.sack_ok;
- ireq->wscale_ok = tcp_opt.wscale_ok;
- ireq->tstamp_ok = tcp_opt.saw_tstamp;
-
req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index aadd7cef73b..febfd595a40 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -96,7 +96,7 @@ static void tcp_v6_hash(struct sock *sk)
return;
}
local_bh_disable();
- __inet6_hash(sk);
+ __inet6_hash(sk, NULL);
local_bh_enable();
}
}
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct inet6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1496,7 +1495,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
#endif
- __inet6_hash(newsk);
+ __inet6_hash(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index b001c361ad3..4300df35d37 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -249,6 +249,7 @@
#include <linux/poll.h>
#include <linux/capability.h>
#include <linux/ctype.h> /* isspace() */
+#include <linux/string.h> /* skip_spaces() */
#include <asm/uaccess.h>
#include <linux/init.h>
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 7dea882dbb7..156020d138b 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap,
/* Look at the next command */
start = next;
- /* Scrap whitespaces before the command */
- while(isspace(*start))
- start++;
+ /* Scrap whitespaces before the command */
+ start = skip_spaces(start);
/* ',' is our command separator */
next = strchr(start, ',');
@@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap,
char * endp;
/* Scrap whitespaces before the command */
- while(isspace(*begp))
- begp++;
+ begp = skip_spaces(begp);
/* Convert argument to a number (last arg is the base) */
addr = simple_strtoul(begp, &endp, 16);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1e428863574..c18286a2167 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev)
return 0;
}
-static struct dev_pm_ops afiucv_pm_ops = {
+static const struct dev_pm_ops afiucv_pm_ops = {
.prepare = afiucv_pm_prepare,
.complete = afiucv_pm_complete,
.freeze = afiucv_pm_freeze,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3b1f5f5f8de..fd8b28361a6 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *);
static int iucv_pm_thaw(struct device *);
static int iucv_pm_restore(struct device *);
-static struct dev_pm_ops iucv_pm_ops = {
+static const struct dev_pm_ops iucv_pm_ops = {
.prepare = iucv_pm_prepare,
.complete = iucv_pm_complete,
.freeze = iucv_pm_freeze,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 84209fbbeb1..76fa6fef647 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->aalg->alg_key_len = key->sadb_key_bits;
memcpy(x->aalg->alg_key, key+1, keysize);
}
+ x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits;
x->props.aalgo = sa->sadb_sa_auth;
/* x->algo.flags = sa->sadb_sa_flags; */
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 93ee1fd5c08..6dc3579c0ac 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -354,7 +354,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->rx_packets = sta->rx_packets;
sinfo->tx_packets = sta->tx_packets;
- if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
+ if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
+ (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = (s8)sta->last_signal;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 419f186cfcf..91dc8636d64 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -746,6 +746,7 @@ struct ieee80211_local {
unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
bool pspolling;
+ bool scan_ps_enabled;
/*
* PS can only be enabled when we have exactly one managed
* interface (and monitors) in PS, this then points there.
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c0fe46493f7..6a433142959 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -427,7 +427,7 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
char *addr5, char *addr6)
{
int aelen = 0;
- memset(meshhdr, 0, sizeof(meshhdr));
+ memset(meshhdr, 0, sizeof(*meshhdr));
meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
sdata->u.mesh.mesh_seqnum++;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 31e10254186..85562c59d7d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -188,8 +188,9 @@ struct mesh_rmc {
*/
#define MESH_PREQ_MIN_INT 10
#define MESH_DIAM_TRAVERSAL_TIME 50
-/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their
- * expiration
+/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before
+ * timing out. This way it will remain ACTIVE and no data frames will be
+ * unnecesarily held in the pending queue.
*/
#define MESH_PATH_REFRESH_TIME 1000
#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 833b2f3670c..d28acb6b1f8 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -937,7 +937,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
if (mpath->flags & MESH_PATH_ACTIVE) {
if (time_after(jiffies,
- mpath->exp_time +
+ mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
!memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 6dc7b5ad9a4..d8d50fb5e82 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1083,8 +1083,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_set_wmm_default(sdata);
- ieee80211_recalc_idle(local);
-
/* channel(_type) changes are handled by ieee80211_hw_config */
local->oper_channel_type = NL80211_CHAN_NO_HT;
@@ -1370,6 +1368,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
if (!wk) {
ieee80211_set_disassoc(sdata, true);
+ ieee80211_recalc_idle(sdata->local);
} else {
list_del(&wk->list);
kfree(wk);
@@ -1403,6 +1402,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->dev->name, mgmt->sa, reason_code);
ieee80211_set_disassoc(sdata, false);
+ ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DISASSOC;
}
@@ -2117,6 +2117,7 @@ static void ieee80211_sta_work(struct work_struct *work)
" after %dms, disconnecting.\n",
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
ieee80211_set_disassoc(sdata, true);
+ ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
/*
* must be outside lock due to cfg80211,
@@ -2560,6 +2561,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
IEEE80211_STYPE_DEAUTH, req->reason_code,
cookie);
+ ieee80211_recalc_idle(sdata->local);
+
return 0;
}
@@ -2592,5 +2595,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
IEEE80211_STYPE_DISASSOC, req->reason_code,
cookie);
+
+ ieee80211_recalc_idle(sdata->local);
+
return 0;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f237df40837..9f2807aeaf5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1712,7 +1712,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
mpp_path_add(proxied_addr, mpp_addr, sdata);
} else {
spin_lock_bh(&mppath->state_lock);
- mppath->exp_time = jiffies;
if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
spin_unlock_bh(&mppath->state_lock);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4cf387c944b..f1a4c716030 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -227,7 +227,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
- bool ps = false;
+
+ local->scan_ps_enabled = false;
/* FIXME: what to do when local->pspolling is true? */
@@ -235,12 +236,13 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
cancel_work_sync(&local->dynamic_ps_enable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- ps = true;
+ local->scan_ps_enabled = true;
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ if (!(local->scan_ps_enabled) ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
@@ -261,7 +263,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
if (!local->ps_sdata)
ieee80211_send_nullfunc(local, sdata, 0);
- else {
+ else if (local->scan_ps_enabled) {
/*
* In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
* will send a nullfunc frame with the powersave bit set
@@ -277,6 +279,16 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
*/
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ } else if (local->hw.conf.dynamic_ps_timeout > 0) {
+ /*
+ * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
+ * had been running before leaving the operating channel,
+ * restart the timer now and send a nullfunc frame to inform
+ * the AP that we are awake.
+ */
+ ieee80211_send_nullfunc(local, sdata, 0);
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d09f78bb244..78a6e924c7e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -579,7 +579,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
if (elen > left)
break;
- if (calc_crc && id < 64 && (filter & BIT(id)))
+ if (calc_crc && id < 64 && (filter & (1ULL << id)))
crc = crc32_be(crc, pos - 2, elen + 2);
switch (id) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b95699f0054..847ffca4018 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1366,6 +1366,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
== sysctl_ip_vs_sync_threshold[0])) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
+ (cp->state == IP_VS_TCP_S_CLOSE) ||
(cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
(cp->state == IP_VS_TCP_S_TIME_WAIT)))))
ip_vs_sync_conn(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e55a6861d26..6bde12da2fe 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2714,6 +2714,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
return -EINVAL;
+ memset(usvc, 0, sizeof(*usvc));
+
usvc->af = nla_get_u16(nla_af);
#ifdef CONFIG_IP_VS_IPV6
if (usvc->af != AF_INET && usvc->af != AF_INET6)
@@ -2901,6 +2903,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
if (!(nla_addr && nla_port))
return -EINVAL;
+ memset(udest, 0, sizeof(*udest));
+
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_u16(nla_port);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index eb0ceb84652..fc70a49c0af 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file,
if (copy_from_user(buf, input, size))
return -EFAULT;
- while (isspace(*c))
- c++;
+ c = skip_spaces(c);
if (size - (c - buf) < 5)
return c - buf;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 020562164b5..e0516a22be2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct net_device *dev;
__be16 proto = 0;
int err;
@@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
*/
saddr->spkt_device[13] = 0;
+retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
@@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
if (len > dev->mtu + dev->hard_header_len)
goto out_unlock;
- err = -ENOBUFS;
- skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
-
- /*
- * If the write buffer is full, then tough. At this level the user
- * gets to deal with the problem - do your own algorithmic backoffs.
- * That's far more flexible.
- */
-
- if (skb == NULL)
- goto out_unlock;
-
- /*
- * Fill it in
- */
-
- /* FIXME: Save some space for broken drivers that write a
- * hard header at transmission time by themselves. PPP is the
- * notable one here. This should really be fixed at the driver level.
- */
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
-
- /* Try to align data part correctly */
- if (dev->header_ops) {
- skb->data -= dev->hard_header_len;
- skb->tail -= dev->hard_header_len;
- if (len < dev->hard_header_len)
- skb_reset_network_header(skb);
+ if (!skb) {
+ size_t reserved = LL_RESERVED_SPACE(dev);
+ unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
+
+ rcu_read_unlock();
+ skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+ if (skb == NULL)
+ return -ENOBUFS;
+ /* FIXME: Save some space for broken drivers that write a hard
+ * header at transmission time by themselves. PPP is the notable
+ * one here. This should really be fixed at the driver level.
+ */
+ skb_reserve(skb, reserved);
+ skb_reset_network_header(skb);
+
+ /* Try to align data part correctly */
+ if (hhlen) {
+ skb->data -= hhlen;
+ skb->tail -= hhlen;
+ if (len < hhlen)
+ skb_reset_network_header(skb);
+ }
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ if (err)
+ goto out_free;
+ goto retry;
}
- /* Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- if (err)
- goto out_free;
-
- /*
- * Now send it
- */
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
-out_free:
- kfree_skb(skb);
out_unlock:
rcu_read_unlock();
+out_free:
+ kfree_skb(skb);
return err;
}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 536ebe5d3f6..3b899236104 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
- ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
- ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
+ rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
+ rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index db224f7c293..b28fa8525b2 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn,
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
- ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
- ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
+ rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
+ rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 448e5a0fcc2..c218e07e5ca 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -579,6 +579,8 @@ static ssize_t rfkill_name_show(struct device *dev,
static const char *rfkill_get_type_str(enum rfkill_type type)
{
+ BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
+
switch (type) {
case RFKILL_TYPE_WLAN:
return "wlan";
@@ -597,8 +599,6 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
default:
BUG();
}
-
- BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
}
static ssize_t rfkill_type_show(struct device *dev,
diff --git a/net/socket.c b/net/socket.c
index b94c3dd7101..769c386bd42 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -312,18 +312,6 @@ static struct file_system_type sock_fs_type = {
.kill_sb = kill_anon_super,
};
-static int sockfs_delete_dentry(struct dentry *dentry)
-{
- /*
- * At creation time, we pretended this dentry was hashed
- * (by clearing DCACHE_UNHASHED bit in d_flags)
- * At delete time, we restore the truth : not hashed.
- * (so that dput() can proceed correctly)
- */
- dentry->d_flags |= DCACHE_UNHASHED;
- return 0;
-}
-
/*
* sockfs_dname() is called from d_path().
*/
@@ -334,7 +322,6 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
}
static const struct dentry_operations sockfs_dentry_operations = {
- .d_delete = sockfs_delete_dentry,
.d_dname = sockfs_dname,
};
@@ -355,68 +342,55 @@ static const struct dentry_operations sockfs_dentry_operations = {
* but we take care of internal coherence yet.
*/
-static int sock_alloc_fd(struct file **filep, int flags)
+static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
{
+ struct qstr name = { .name = "" };
+ struct path path;
+ struct file *file;
int fd;
fd = get_unused_fd_flags(flags);
- if (likely(fd >= 0)) {
- struct file *file = get_empty_filp();
-
- *filep = file;
- if (unlikely(!file)) {
- put_unused_fd(fd);
- return -ENFILE;
- }
- } else
- *filep = NULL;
- return fd;
-}
-
-static int sock_attach_fd(struct socket *sock, struct file *file, int flags)
-{
- struct dentry *dentry;
- struct qstr name = { .name = "" };
+ if (unlikely(fd < 0))
+ return fd;
- dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
- if (unlikely(!dentry))
+ path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
+ if (unlikely(!path.dentry)) {
+ put_unused_fd(fd);
return -ENOMEM;
+ }
+ path.mnt = mntget(sock_mnt);
- dentry->d_op = &sockfs_dentry_operations;
- /*
- * We dont want to push this dentry into global dentry hash table.
- * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
- * This permits a working /proc/$pid/fd/XXX on sockets
- */
- dentry->d_flags &= ~DCACHE_UNHASHED;
- d_instantiate(dentry, SOCK_INODE(sock));
+ path.dentry->d_op = &sockfs_dentry_operations;
+ d_instantiate(path.dentry, SOCK_INODE(sock));
+ SOCK_INODE(sock)->i_fop = &socket_file_ops;
- sock->file = file;
- init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
+ file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
- SOCK_INODE(sock)->i_fop = &socket_file_ops;
+ if (unlikely(!file)) {
+ /* drop dentry, keep inode */
+ atomic_inc(&path.dentry->d_inode->i_count);
+ path_put(&path);
+ put_unused_fd(fd);
+ return -ENFILE;
+ }
+
+ sock->file = file;
file->f_flags = O_RDWR | (flags & O_NONBLOCK);
file->f_pos = 0;
file->private_data = sock;
- return 0;
+ *f = file;
+ return fd;
}
int sock_map_fd(struct socket *sock, int flags)
{
struct file *newfile;
- int fd = sock_alloc_fd(&newfile, flags);
+ int fd = sock_alloc_file(sock, &newfile, flags);
- if (likely(fd >= 0)) {
- int err = sock_attach_fd(sock, newfile, flags);
-
- if (unlikely(err < 0)) {
- put_filp(newfile);
- put_unused_fd(fd);
- return err;
- }
+ if (likely(fd >= 0))
fd_install(fd, newfile);
- }
+
return fd;
}
@@ -1390,29 +1364,19 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
if (err < 0)
goto out_release_both;
- fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC);
+ fd1 = sock_alloc_file(sock1, &newfile1, flags);
if (unlikely(fd1 < 0)) {
err = fd1;
goto out_release_both;
}
- fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC);
+ fd2 = sock_alloc_file(sock2, &newfile2, flags);
if (unlikely(fd2 < 0)) {
err = fd2;
- put_filp(newfile1);
- put_unused_fd(fd1);
- goto out_release_both;
- }
-
- err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK);
- if (unlikely(err < 0)) {
- goto out_fd2;
- }
-
- err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK);
- if (unlikely(err < 0)) {
fput(newfile1);
- goto out_fd1;
+ put_unused_fd(fd1);
+ sock_release(sock2);
+ goto out;
}
audit_fd_pair(fd1, fd2);
@@ -1438,16 +1402,6 @@ out_release_1:
sock_release(sock1);
out:
return err;
-
-out_fd2:
- put_filp(newfile1);
- sock_release(sock1);
-out_fd1:
- put_filp(newfile2);
- sock_release(sock2);
- put_unused_fd(fd1);
- put_unused_fd(fd2);
- goto out;
}
/*
@@ -1551,17 +1505,13 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
*/
__module_get(newsock->ops->owner);
- newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC);
+ newfd = sock_alloc_file(newsock, &newfile, flags);
if (unlikely(newfd < 0)) {
err = newfd;
sock_release(newsock);
goto out_put;
}
- err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK);
- if (err < 0)
- goto out_fd_simple;
-
err = security_socket_accept(sock, newsock);
if (err)
goto out_fd;
@@ -1591,11 +1541,6 @@ out_put:
fput_light(sock->file, fput_needed);
out:
return err;
-out_fd_simple:
- sock_release(newsock);
- put_filp(newfile);
- put_unused_fd(newfd);
- goto out_put;
out_fd:
fput(newfile);
put_unused_fd(newfd);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index c7450c8f0a7..6dcdd251781 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -55,16 +55,8 @@ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
/*
* RFC 4291, Section 2.2.1
- *
- * To keep the result as short as possible, especially
- * since we don't shorthand, we don't want leading zeros
- * in each halfword, so avoid %pI6.
*/
- return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x",
- ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]),
- ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]),
- ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]),
- ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7]));
+ return snprintf(buf, buflen, "%pI6c", addr);
}
static size_t rpc_ntop6(const struct sockaddr *sap,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 7535a7bed2f..f394fc190a4 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -123,16 +123,19 @@ rpcauth_unhash_cred_locked(struct rpc_cred *cred)
clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
}
-static void
+static int
rpcauth_unhash_cred(struct rpc_cred *cred)
{
spinlock_t *cache_lock;
+ int ret;
cache_lock = &cred->cr_auth->au_credcache->lock;
spin_lock(cache_lock);
- if (atomic_read(&cred->cr_count) == 0)
+ ret = atomic_read(&cred->cr_count) == 0;
+ if (ret)
rpcauth_unhash_cred_locked(cred);
spin_unlock(cache_lock);
+ return ret;
}
/*
@@ -446,31 +449,35 @@ void
put_rpccred(struct rpc_cred *cred)
{
/* Fast path for unhashed credentials */
- if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
- goto need_lock;
-
- if (!atomic_dec_and_test(&cred->cr_count))
+ if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
+ if (atomic_dec_and_test(&cred->cr_count))
+ cred->cr_ops->crdestroy(cred);
return;
- goto out_destroy;
-need_lock:
+ }
+
if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
return;
if (!list_empty(&cred->cr_lru)) {
number_cred_unused--;
list_del_init(&cred->cr_lru);
}
- if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
- rpcauth_unhash_cred(cred);
if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
- cred->cr_expire = jiffies;
- list_add_tail(&cred->cr_lru, &cred_unused);
- number_cred_unused++;
- spin_unlock(&rpc_credcache_lock);
- return;
+ if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
+ cred->cr_expire = jiffies;
+ list_add_tail(&cred->cr_lru, &cred_unused);
+ number_cred_unused++;
+ goto out_nodestroy;
+ }
+ if (!rpcauth_unhash_cred(cred)) {
+ /* We were hashed and someone looked us up... */
+ goto out_nodestroy;
+ }
}
spin_unlock(&rpc_credcache_lock);
-out_destroy:
cred->cr_ops->crdestroy(cred);
+ return;
+out_nodestroy:
+ spin_unlock(&rpc_credcache_lock);
}
EXPORT_SYMBOL_GPL(put_rpccred);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index fc6a43ccd95..3c3c50f38a1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -304,7 +304,7 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
* to that upcall instead of adding the new upcall.
*/
static inline struct gss_upcall_msg *
-gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
+gss_add_msg(struct gss_upcall_msg *gss_msg)
{
struct rpc_inode *rpci = gss_msg->inode;
struct inode *inode = &rpci->vfs_inode;
@@ -445,7 +445,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
if (IS_ERR(gss_new))
return gss_new;
- gss_msg = gss_add_msg(gss_auth, gss_new);
+ gss_msg = gss_add_msg(gss_new);
if (gss_msg == gss_new) {
struct inode *inode = &gss_new->inode->vfs_inode;
int res = rpc_queue_upcall(inode, &gss_new->msg);
@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task)
dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
cred->cr_uid);
gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
- if (IS_ERR(gss_msg) == -EAGAIN) {
+ if (PTR_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
* shouldn't normally hit this case on a refresh. */
warn_gssd();
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 38829e20500..154034b675b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -79,7 +79,7 @@ static void call_connect_status(struct rpc_task *task);
static __be32 *rpc_encode_header(struct rpc_task *task);
static __be32 *rpc_verify_header(struct rpc_task *task);
-static int rpc_ping(struct rpc_clnt *clnt, int flags);
+static int rpc_ping(struct rpc_clnt *clnt);
static void rpc_register_client(struct rpc_clnt *clnt)
{
@@ -340,7 +340,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
return clnt;
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
- int err = rpc_ping(clnt, RPC_TASK_SOFT);
+ int err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
@@ -528,7 +528,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
- err = rpc_ping(clnt, RPC_TASK_SOFT);
+ err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
@@ -1060,7 +1060,7 @@ call_bind_status(struct rpc_task *task)
goto retry_timeout;
case -EPFNOSUPPORT:
/* server doesn't support any rpcbind version we know of */
- dprintk("RPC: %5u remote rpcbind service unavailable\n",
+ dprintk("RPC: %5u unrecognized remote rpcbind service\n",
task->tk_pid);
break;
case -EPROTONOSUPPORT:
@@ -1069,6 +1069,21 @@ call_bind_status(struct rpc_task *task)
task->tk_status = 0;
task->tk_action = call_bind;
return;
+ case -ECONNREFUSED: /* connection problems */
+ case -ECONNRESET:
+ case -ENOTCONN:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EPIPE:
+ dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
+ task->tk_pid, task->tk_status);
+ if (!RPC_IS_SOFTCONN(task)) {
+ rpc_delay(task, 5*HZ);
+ goto retry_timeout;
+ }
+ status = task->tk_status;
+ break;
default:
dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
task->tk_pid, -task->tk_status);
@@ -1180,11 +1195,25 @@ static void
call_transmit_status(struct rpc_task *task)
{
task->tk_action = call_status;
+
+ /*
+ * Common case: success. Force the compiler to put this
+ * test first.
+ */
+ if (task->tk_status == 0) {
+ xprt_end_transmit(task);
+ rpc_task_force_reencode(task);
+ return;
+ }
+
switch (task->tk_status) {
case -EAGAIN:
break;
default:
+ dprint_status(task);
xprt_end_transmit(task);
+ rpc_task_force_reencode(task);
+ break;
/*
* Special cases: if we've been waiting on the
* socket's write_space() callback, or if the
@@ -1192,11 +1221,16 @@ call_transmit_status(struct rpc_task *task)
* then hold onto the transport lock.
*/
case -ECONNREFUSED:
- case -ECONNRESET:
- case -ENOTCONN:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
+ if (RPC_IS_SOFTCONN(task)) {
+ xprt_end_transmit(task);
+ rpc_exit(task, task->tk_status);
+ break;
+ }
+ case -ECONNRESET:
+ case -ENOTCONN:
case -EPIPE:
rpc_task_force_reencode(task);
}
@@ -1346,6 +1380,10 @@ call_timeout(struct rpc_task *task)
dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
task->tk_timeouts++;
+ if (RPC_IS_SOFTCONN(task)) {
+ rpc_exit(task, -ETIMEDOUT);
+ return;
+ }
if (RPC_IS_SOFT(task)) {
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
@@ -1675,14 +1713,14 @@ static struct rpc_procinfo rpcproc_null = {
.p_decode = rpcproc_decode_null,
};
-static int rpc_ping(struct rpc_clnt *clnt, int flags)
+static int rpc_ping(struct rpc_clnt *clnt)
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
};
int err;
msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
- err = rpc_call_sync(clnt, &msg, flags);
+ err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
put_rpccred(msg.rpc_cred);
return err;
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 830faf4d999..3e3772d8eb9 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -20,6 +20,7 @@
#include <linux/in6.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/mutex.h>
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
@@ -110,6 +111,9 @@ static void rpcb_getport_done(struct rpc_task *, void *);
static void rpcb_map_release(void *data);
static struct rpc_program rpcb_program;
+static struct rpc_clnt * rpcb_local_clnt;
+static struct rpc_clnt * rpcb_local_clnt4;
+
struct rpcbind_args {
struct rpc_xprt * r_xprt;
@@ -163,21 +167,60 @@ static const struct sockaddr_in rpcb_inaddr_loopback = {
.sin_port = htons(RPCBIND_PORT),
};
-static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
- size_t addrlen, u32 version)
+static DEFINE_MUTEX(rpcb_create_local_mutex);
+
+/*
+ * Returns zero on success, otherwise a negative errno value
+ * is returned.
+ */
+static int rpcb_create_local(void)
{
struct rpc_create_args args = {
- .protocol = XPRT_TRANSPORT_UDP,
- .address = addr,
- .addrsize = addrlen,
+ .protocol = XPRT_TRANSPORT_TCP,
+ .address = (struct sockaddr *)&rpcb_inaddr_loopback,
+ .addrsize = sizeof(rpcb_inaddr_loopback),
.servername = "localhost",
.program = &rpcb_program,
- .version = version,
+ .version = RPCBVERS_2,
.authflavor = RPC_AUTH_UNIX,
.flags = RPC_CLNT_CREATE_NOPING,
};
+ struct rpc_clnt *clnt, *clnt4;
+ int result = 0;
+
+ if (rpcb_local_clnt)
+ return result;
+
+ mutex_lock(&rpcb_create_local_mutex);
+ if (rpcb_local_clnt)
+ goto out;
+
+ clnt = rpc_create(&args);
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create local rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+ result = -PTR_ERR(clnt);
+ goto out;
+ }
- return rpc_create(&args);
+ /*
+ * This results in an RPC ping. On systems running portmapper,
+ * the v4 ping will fail. Proceed anyway, but disallow rpcb
+ * v4 upcalls.
+ */
+ clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
+ if (IS_ERR(clnt4)) {
+ dprintk("RPC: failed to create local rpcbind v4 "
+ "cleint (errno %ld).\n", PTR_ERR(clnt4));
+ clnt4 = NULL;
+ }
+
+ rpcb_local_clnt = clnt;
+ rpcb_local_clnt4 = clnt4;
+
+out:
+ mutex_unlock(&rpcb_create_local_mutex);
+ return result;
}
static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
@@ -209,22 +252,13 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
return rpc_create(&args);
}
-static int rpcb_register_call(const u32 version, struct rpc_message *msg)
+static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
{
- struct sockaddr *addr = (struct sockaddr *)&rpcb_inaddr_loopback;
- size_t addrlen = sizeof(rpcb_inaddr_loopback);
- struct rpc_clnt *rpcb_clnt;
int result, error = 0;
msg->rpc_resp = &result;
- rpcb_clnt = rpcb_create_local(addr, addrlen, version);
- if (!IS_ERR(rpcb_clnt)) {
- error = rpc_call_sync(rpcb_clnt, msg, 0);
- rpc_shutdown_client(rpcb_clnt);
- } else
- error = PTR_ERR(rpcb_clnt);
-
+ error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
@@ -279,6 +313,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
struct rpc_message msg = {
.rpc_argp = &map,
};
+ int error;
+
+ error = rpcb_create_local();
+ if (error)
+ return error;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
@@ -288,7 +327,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
if (port)
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
- return rpcb_register_call(RPCBVERS_2, &msg);
+ return rpcb_register_call(rpcb_local_clnt, &msg);
}
/*
@@ -313,7 +352,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- result = rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
@@ -340,7 +379,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- result = rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
@@ -356,7 +395,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- return rpcb_register_call(RPCBVERS_4, msg);
+ return rpcb_register_call(rpcb_local_clnt4, msg);
}
/**
@@ -414,6 +453,13 @@ int rpcb_v4_register(const u32 program, const u32 version,
struct rpc_message msg = {
.rpc_argp = &map,
};
+ int error;
+
+ error = rpcb_create_local();
+ if (error)
+ return error;
+ if (rpcb_local_clnt4 == NULL)
+ return -EPROTONOSUPPORT;
if (address == NULL)
return rpcb_unregister_all_protofamilies(&msg);
@@ -491,7 +537,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
.rpc_message = &msg,
.callback_ops = &rpcb_getport_ops,
.callback_data = map,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN,
};
return rpc_run_task(&task_setup_data);
@@ -1027,3 +1073,15 @@ static struct rpc_program rpcb_program = {
.version = rpcb_version,
.stats = &rpcb_stats,
};
+
+/**
+ * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
+ *
+ */
+void cleanup_rpcb_clnt(void)
+{
+ if (rpcb_local_clnt4)
+ rpc_shutdown_client(rpcb_local_clnt4);
+ if (rpcb_local_clnt)
+ rpc_shutdown_client(rpcb_local_clnt);
+}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cef74ba0666..aae6907fd54 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -210,6 +210,7 @@ void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qnam
{
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
}
+EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
@@ -385,6 +386,20 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r
}
/*
+ * Tests whether rpc queue is empty
+ */
+int rpc_queue_empty(struct rpc_wait_queue *queue)
+{
+ int res;
+
+ spin_lock_bh(&queue->lock);
+ res = queue->qlen;
+ spin_unlock_bh(&queue->lock);
+ return (res == 0);
+}
+EXPORT_SYMBOL_GPL(rpc_queue_empty);
+
+/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8cce9218901..f438347d817 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -24,6 +24,8 @@
extern struct cache_detail ip_map_cache, unix_gid_cache;
+extern void cleanup_rpcb_clnt(void);
+
static int __init
init_sunrpc(void)
{
@@ -53,6 +55,7 @@ out:
static void __exit
cleanup_sunrpc(void)
{
+ cleanup_rpcb_clnt();
rpcauth_remove_module();
cleanup_socket_xprt();
svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b845e2293df..1c924ee0a1e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -16,8 +16,6 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
-#define SVC_MAX_WAKING 5
-
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
struct svc_pool *pool;
struct svc_rqst *rqstp;
int cpu;
- int thread_avail;
if (!(xprt->xpt_flags &
((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
spin_lock_bh(&pool->sp_lock);
+ if (!list_empty(&pool->sp_threads) &&
+ !list_empty(&pool->sp_sockets))
+ printk(KERN_ERR
+ "svc_xprt_enqueue: "
+ "threads and transports both waiting??\n");
+
if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
/* Don't enqueue dead transports */
dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
}
process:
- /* Work out whether threads are available */
- thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
- if (pool->sp_nwaking >= SVC_MAX_WAKING) {
- /* too many threads are runnable and trying to wake up */
- thread_avail = 0;
- pool->sp_stats.overloads_avoided++;
- }
-
- if (thread_avail) {
+ if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next,
struct svc_rqst,
rq_list);
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
- rqstp->rq_waking = 1;
- pool->sp_nwaking++;
pool->sp_stats.threads_woken++;
BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait);
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
return -EINTR;
spin_lock_bh(&pool->sp_lock);
- if (rqstp->rq_waking) {
- rqstp->rq_waking = 0;
- pool->sp_nwaking--;
- BUG_ON(pool->sp_nwaking < 0);
- }
xprt = svc_xprt_dequeue(pool);
if (xprt) {
rqstp->rq_xprt = xprt;
@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
struct svc_pool *pool = p;
if (p == SEQ_START_TOKEN) {
- seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
+ seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
return 0;
}
- seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
+ seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id,
pool->sp_stats.packets,
pool->sp_stats.sockets_queued,
pool->sp_stats.threads_woken,
- pool->sp_stats.overloads_avoided,
pool->sp_stats.threads_timedout);
return 0;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 4a8f6558718..d8c04111449 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(uid_t uid)
return NULL;
}
-static int unix_gid_find(uid_t uid, struct group_info **gip,
- struct svc_rqst *rqstp)
+static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
{
- struct unix_gid *ug = unix_gid_lookup(uid);
+ struct unix_gid *ug;
+ struct group_info *gi;
+ int ret;
+
+ ug = unix_gid_lookup(uid);
if (!ug)
- return -EAGAIN;
- switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
+ return ERR_PTR(-EAGAIN);
+ ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
+ switch (ret) {
case -ENOENT:
- *gip = NULL;
- return 0;
+ return ERR_PTR(-ENOENT);
case 0:
- *gip = ug->gi;
- get_group_info(*gip);
+ gi = get_group_info(ug->gi);
cache_put(&ug->h, &unix_gid_cache);
- return 0;
+ return gi;
default:
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
}
@@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
struct sockaddr_in *sin;
struct sockaddr_in6 *sin6, sin6_storage;
struct ip_map *ipm;
+ struct group_info *gi;
+ struct svc_cred *cred = &rqstp->rq_cred;
switch (rqstp->rq_addr.ss_family) {
case AF_INET:
@@ -721,6 +725,17 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
ip_map_cached_put(rqstp, ipm);
break;
}
+
+ gi = unix_gid_find(cred->cr_uid, rqstp);
+ switch (PTR_ERR(gi)) {
+ case -EAGAIN:
+ return SVC_DROP;
+ case -ENOENT:
+ break;
+ default:
+ put_group_info(cred->cr_group_info);
+ cred->cr_group_info = gi;
+ }
return SVC_OK;
}
@@ -817,19 +832,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
slen = svc_getnl(argv); /* gids length */
if (slen > 16 || (len -= (slen + 2)*4) < 0)
goto badcred;
- if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp)
- == -EAGAIN)
+ cred->cr_group_info = groups_alloc(slen);
+ if (cred->cr_group_info == NULL)
return SVC_DROP;
- if (cred->cr_group_info == NULL) {
- cred->cr_group_info = groups_alloc(slen);
- if (cred->cr_group_info == NULL)
- return SVC_DROP;
- for (i = 0; i < slen; i++)
- GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
- } else {
- for (i = 0; i < slen ; i++)
- svc_getnl(argv);
- }
+ for (i = 0; i < slen; i++)
+ GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
*authp = rpc_autherr_badverf;
return SVC_DENIED;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index fd46d42afa8..469de292c23 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -700,6 +700,10 @@ void xprt_connect(struct rpc_task *task)
}
if (!xprt_lock_write(xprt, task))
return;
+
+ if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ xprt->ops->close(xprt);
+
if (xprt_connected(xprt))
xprt_release_write(xprt, task);
else {
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 04732d09013..3d739e5d15d 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2019,7 +2019,7 @@ static void xs_connect(struct rpc_task *task)
if (xprt_test_and_set_connecting(xprt))
return;
- if (transport->sock != NULL) {
+ if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
xprt, xprt->reestablish_timeout / HZ);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index c01470e7de1..baa898add28 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -141,62 +141,35 @@ static const struct ieee80211_regdomain us_regdom = {
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
- /* IEEE 802.11a, channel 36 */
- REG_RULE(5180-10, 5180+10, 40, 6, 23, 0),
- /* IEEE 802.11a, channel 40 */
- REG_RULE(5200-10, 5200+10, 40, 6, 23, 0),
- /* IEEE 802.11a, channel 44 */
- REG_RULE(5220-10, 5220+10, 40, 6, 23, 0),
+ /* IEEE 802.11a, channel 36..48 */
+ REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
/* IEEE 802.11a, channels 48..64 */
- REG_RULE(5240-10, 5320+10, 40, 6, 23, 0),
+ REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
+ /* IEEE 802.11a, channels 100..124 */
+ REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
+ /* IEEE 802.11a, channels 132..144 */
+ REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
/* IEEE 802.11a, channels 149..165, outdoor */
REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
}
};
static const struct ieee80211_regdomain jp_regdom = {
- .n_reg_rules = 3,
+ .n_reg_rules = 6,
.alpha2 = "JP",
.reg_rules = {
- /* IEEE 802.11b/g, channels 1..14 */
- REG_RULE(2412-10, 2484+10, 40, 6, 20, 0),
- /* IEEE 802.11a, channels 34..48 */
- REG_RULE(5170-10, 5240+10, 40, 6, 20,
- NL80211_RRF_PASSIVE_SCAN),
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
+ /* IEEE 802.11b/g, channels 12..13 */
+ REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
+ /* IEEE 802.11b/g, channel 14 */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
+ /* IEEE 802.11a, channels 36..48 */
+ REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
/* IEEE 802.11a, channels 52..64 */
- REG_RULE(5260-10, 5320+10, 40, 6, 20,
- NL80211_RRF_NO_IBSS |
- NL80211_RRF_DFS),
- }
-};
-
-static const struct ieee80211_regdomain eu_regdom = {
- .n_reg_rules = 6,
- /*
- * This alpha2 is bogus, we leave it here just for stupid
- * backward compatibility
- */
- .alpha2 = "EU",
- .reg_rules = {
- /* IEEE 802.11b/g, channels 1..13 */
- REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
- /* IEEE 802.11a, channel 36 */
- REG_RULE(5180-10, 5180+10, 40, 6, 23,
- NL80211_RRF_PASSIVE_SCAN),
- /* IEEE 802.11a, channel 40 */
- REG_RULE(5200-10, 5200+10, 40, 6, 23,
- NL80211_RRF_PASSIVE_SCAN),
- /* IEEE 802.11a, channel 44 */
- REG_RULE(5220-10, 5220+10, 40, 6, 23,
- NL80211_RRF_PASSIVE_SCAN),
- /* IEEE 802.11a, channels 48..64 */
- REG_RULE(5240-10, 5320+10, 40, 6, 20,
- NL80211_RRF_NO_IBSS |
- NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 100..140 */
- REG_RULE(5500-10, 5700+10, 40, 6, 30,
- NL80211_RRF_NO_IBSS |
- NL80211_RRF_DFS),
+ REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
+ /* IEEE 802.11a, channels 100..144 */
+ REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
}
};
@@ -206,15 +179,17 @@ static const struct ieee80211_regdomain *static_regdom(char *alpha2)
return &us_regdom;
if (alpha2[0] == 'J' && alpha2[1] == 'P')
return &jp_regdom;
+ /* Use world roaming rules for "EU", since it was a pseudo
+ domain anyway... */
if (alpha2[0] == 'E' && alpha2[1] == 'U')
- return &eu_regdom;
- /* Default, as per the old rules */
- return &us_regdom;
+ return &world_regdom;
+ /* Default, world roaming rules */
+ return &world_regdom;
}
static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
{
- if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom)
+ if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
return true;
return false;
}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 584eb4826e0..54face3d442 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -479,6 +479,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
}
err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
}
+ wdev->wext.connect.privacy = false;
/*
* Applications using wireless extensions expect to be
* able to delete keys that don't exist, so allow that.