aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/bnep/core.c4
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap.c5
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/compat.c11
-rw-r--r--net/core/dev.c21
-rw-r--r--net/core/pktgen.c9
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c2
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c21
-rw-r--r--net/ipv4/syncookies.c27
-rw-r--r--net/ipv4/tcp_input.c24
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c7
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/ipv6/syncookies.c28
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/irda/irlap.c14
-rw-r--r--net/irda/irlap_event.c2
-rw-r--r--net/irda/irlmp.c4
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/netfilter/ipvs/Kconfig3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c15
-rw-r--r--net/netfilter/nf_conntrack_ftp.c18
-rw-r--r--net/netlabel/netlabel_domainhash.c2
-rw-r--r--net/packet/af_packet.c90
-rw-r--r--net/rose/rose_loopback.c2
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c2
-rw-r--r--net/wimax/op-reset.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
47 files changed, 251 insertions, 210 deletions
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 29b1b220d6c..ef09c7b3a85 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -78,7 +78,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
static void __bnep_link_session(struct bnep_session *s)
{
/* It's safe to call __module_get() here because sessions are added
- by the socket layer which has to hold the refference to this module.
+ by the socket layer which has to hold the reference to this module.
*/
__module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
@@ -632,7 +632,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
s = __bnep_get_session(req->dst);
if (s) {
/* Wakeup user-space which is polling for socket errors.
- * This is temporary hack untill we have shutdown in L2CAP */
+ * This is temporary hack until we have shutdown in L2CAP */
s->sock->sk->sk_err = EUNATCH;
/* Kill session thread */
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 569750010fd..18e7f5a43dc 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -770,7 +770,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid = hid_allocate_device();
if (IS_ERR(hid))
- return PTR_ERR(session->hid);
+ return PTR_ERR(hid);
session->hid = hid;
session->req = req;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 5129b88c8e5..1120cf14a54 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1212,6 +1212,7 @@ static void l2cap_monitor_timeout(unsigned long arg)
bh_lock_sock(sk);
if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
+ bh_unlock_sock(sk);
return;
}
@@ -3435,8 +3436,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
(pi->unacked_frames > 0))
__mod_retrans_timer();
- l2cap_ertm_send(sk);
pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_send(sk);
}
break;
@@ -3471,9 +3472,9 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
if (rx_control & L2CAP_CTRL_POLL) {
- l2cap_retransmit_frame(sk, tx_seq);
pi->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(sk);
+ l2cap_retransmit_frame(sk, tx_seq);
l2cap_ertm_send(sk);
if (pi->conn_state & L2CAP_CONN_WAIT_F) {
pi->srej_save_reqseq = tx_seq;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index bd1c65425d4..0b7f262cd14 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1406,6 +1406,9 @@ static int do_ebt_set_ctl(struct sock *sk,
{
int ret;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
switch(cmd) {
case EBT_SO_SET_ENTRIES:
ret = do_replace(sock_net(sk), user, len);
@@ -1425,6 +1428,9 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ebt_replace tmp;
struct ebt_table *t;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
diff --git a/net/compat.c b/net/compat.c
index e1a56ade803..a1fb1b079a8 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
- struct timespec __user *timeout)
+ struct compat_timespec __user *timeout)
{
int datagrams;
struct timespec ktspec;
- struct compat_timespec __user *utspec;
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
- utspec = (struct compat_timespec __user *)timeout;
- if (get_user(ktspec.tv_sec, &utspec->tv_sec) ||
- get_user(ktspec.tv_nsec, &utspec->tv_nsec))
+ if (get_compat_timespec(&ktspec, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, &ktspec);
- if (datagrams > 0 &&
- (put_user(ktspec.tv_sec, &utspec->tv_sec) ||
- put_user(ktspec.tv_nsec, &utspec->tv_nsec)))
+ if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
datagrams = -EFAULT;
return datagrams;
diff --git a/net/core/dev.c b/net/core/dev.c
index c36a17aafcf..be9924f60ec 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev)
static void rollback_registered_many(struct list_head *head)
{
- struct net_device *dev;
+ struct net_device *dev, *tmp;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
- list_for_each_entry(dev, head, unreg_list) {
+ list_for_each_entry_safe(dev, tmp, head, unreg_list) {
/* Some devices call without registering
- * for initialization unwind.
+ * for initialization unwind. Remove those
+ * devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never "
"was registered\n", dev->name, dev);
WARN_ON(1);
- return;
+ list_del(&dev->unreg_list);
+ continue;
}
BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev)
rollback_registered(dev);
dev->reg_state = NETREG_UNREGISTERED;
}
+ /*
+ * Prevent userspace races by waiting until the network
+ * device is fully setup before sending notifications.
+ */
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
out:
return ret;
@@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Notify protocols, that a new device appeared. */
call_netdevice_notifiers(NETDEV_REGISTER, dev);
+ /*
+ * Prevent userspace races by waiting until the network
+ * device is fully setup before sending notifications.
+ */
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+
synchronize_net();
err = 0;
out:
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a23b45f08ec..de0c2c72642 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -250,8 +250,7 @@ struct pktgen_dev {
__u64 count; /* Default No packets to send */
__u64 sofar; /* How many pkts we've sent so far */
__u64 tx_bytes; /* How many bytes we've transmitted */
- __u64 errors; /* Errors when trying to transmit,
- pkts will be re-sent */
+ __u64 errors; /* Errors when trying to transmit, */
/* runtime counters relating to clone_skb */
@@ -3465,6 +3464,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->seq_num++;
pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
break;
+ case NET_XMIT_DROP:
+ case NET_XMIT_CN:
+ case NET_XMIT_POLICED:
+ /* skb has been consumed */
+ pkt_dev->errors++;
+ break;
default: /* Drivers are not supposed to return other values! */
if (net_ratelimit())
pr_info("pktgen: %s xmit error: %d\n",
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33148a56819..794bcb897ff 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_UNREGISTER:
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
break;
- case NETDEV_REGISTER:
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
- break;
case NETDEV_UP:
case NETDEV_DOWN:
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
break;
+ case NETDEV_POST_INIT:
+ case NETDEV_REGISTER:
case NETDEV_CHANGE:
case NETDEV_GOING_DOWN:
+ case NETDEV_UNREGISTER_BATCH:
break;
default:
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/core/sock.c b/net/core/sock.c
index 76ff58d43e2..e1f6f225f01 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1205,6 +1205,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+
+ if (sock_flag(newsk, SOCK_TIMESTAMP) ||
+ sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
+ net_enable_timestamp();
}
out:
return newsk;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5cdbc102a41..040c4f05b65 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1397,6 +1397,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
"accept_source_route"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
+ DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3323168ee52..82dbf711d6d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
no_addr = in_dev->ifa_list == NULL;
rpf = IN_DEV_RPFILTER(in_dev);
accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
+ if (mark && !IN_DEV_SRC_VMARK(in_dev))
+ fl.mark = 0;
}
rcu_read_unlock();
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e34013a78ef..3451799e3db 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -254,7 +254,7 @@ int ip_mc_output(struct sk_buff *skb)
*/
if (rt->rt_flags&RTCF_MULTICAST) {
- if ((!sk || inet_sk(sk)->mc_loop)
+ if (sk_mc_loop(sk)
#ifdef CONFIG_IP_MROUTE
/* Small optimization: do not loopback not local frames,
which returned after forwarding; they will be dropped
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 549e206cdd4..ea5cea2415c 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -50,7 +50,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
struct tcphdr _tcph, *tcph;
__be16 oldval;
- /* Not enought header? */
+ /* Not enough header? */
tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
if (!tcph)
return false;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index fa2d6b6fc3e..331ead3ebd1 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -14,6 +14,7 @@
#include <net/route.h>
#include <net/ip.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
return err;
}
+static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING)
+ return IP_DEFRAG_CONNTRACK_IN;
+ else
+ return IP_DEFRAG_CONNTRACK_OUT;
+}
+
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
#endif
/* Gather fragments. */
if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
- if (nf_ct_ipv4_gather_frags(skb,
- hooknum == NF_INET_PRE_ROUTING ?
- IP_DEFRAG_CONNTRACK_IN :
- IP_DEFRAG_CONNTRACK_OUT))
+ enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
+ if (nf_ct_ipv4_gather_frags(skb, user))
return NF_STOLEN;
}
return NF_ACCEPT;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26399ad2a28..66fd80ef247 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+
+ if (tcp_opt.saw_tstamp)
+ cookie_check_timestamp(&tcp_opt);
+
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
if (!req)
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
ireq->ecn_ok = 0;
+ ireq->snd_wscale = tcp_opt.snd_wscale;
+ ireq->rcv_wscale = tcp_opt.rcv_wscale;
+ ireq->sack_ok = tcp_opt.sack_ok;
+ ireq->wscale_ok = tcp_opt.wscale_ok;
+ ireq->tstamp_ok = tcp_opt.saw_tstamp;
+ req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
}
}
- /* check for timestamp cookie support */
- memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
-
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
-
- ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
- ireq->sack_ok = tcp_opt.sack_ok;
- ireq->wscale_ok = tcp_opt.wscale_ok;
- ireq->tstamp_ok = tcp_opt.saw_tstamp;
- req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-
/* Try to redo what tcp_v4_send_synack did. */
req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 12cab7d74db..28e02963249 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3727,7 +3727,7 @@ old_ack:
* the fast version below fails.
*/
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
- u8 **hvpp, int estab, struct dst_entry *dst)
+ u8 **hvpp, int estab)
{
unsigned char *ptr;
struct tcphdr *th = tcp_hdr(skb);
@@ -3766,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
break;
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW && th->syn &&
- !estab && sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
+ !estab && sysctl_tcp_window_scaling) {
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > 14) {
@@ -3783,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
case TCPOPT_TIMESTAMP:
if ((opsize == TCPOLEN_TIMESTAMP) &&
((estab && opt_rx->tstamp_ok) ||
- (!estab && sysctl_tcp_timestamps &&
- !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
+ (!estab && sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
opt_rx->rcv_tsval = get_unaligned_be32(ptr);
opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3792,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM && th->syn &&
- !estab && sysctl_tcp_sack &&
- !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
+ !estab && sysctl_tcp_sack) {
opt_rx->sack_ok = 1;
tcp_sack_reset(opt_rx);
}
@@ -3878,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
if (tcp_parse_aligned_timestamp(tp, th))
return 1;
}
- tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
+ tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
return 1;
}
@@ -4133,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
- if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
- !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
int mib_idx;
if (before(seq, tp->rcv_nxt))
@@ -4165,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(sk);
- if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
- !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5428,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
int saved_clamp = tp->rx_opt.mss_clamp;
- tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
if (th->ack) {
/* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 15e96030ce4..65b8ebfd078 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif
- ireq = inet_rsk(req);
- ireq->loc_addr = daddr;
- ireq->rmt_addr = saddr;
- ireq->no_srccheck = inet_sk(sk)->transparent;
- ireq->opt = tcp_v4_save_options(sk, skb);
-
- dst = inet_csk_route_req(sk, req);
- if(!dst)
- goto drop_and_free;
-
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
+ ireq = inet_rsk(req);
+ ireq->loc_addr = daddr;
+ ireq->rmt_addr = saddr;
+ ireq->no_srccheck = inet_sk(sk)->transparent;
+ ireq->opt = tcp_v4_save_options(sk, skb);
+
if (security_inet_conn_request(sk, skb, req))
- goto drop_and_release;
+ goto drop_and_free;
if (!want_cookie)
TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
+ (dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 87accec8d09..f206ee5dda8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
int paws_reject = 0;
+ tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
- tmp_opt.tstamp_ok = 1;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
- if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) {
- tmp_opt.tstamp_ok = 1;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
+ tmp_opt.saw_tstamp = 0;
+ if (th->doff > (sizeof(struct tcphdr)>>2)) {
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 93316a96d82..383ce237640 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_md5sig_key **md5) {
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
- struct dst_entry *dst = __sk_dst_get(sk);
unsigned remaining = MAX_TCP_OPTION_SPACE;
u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
tcp_cookie_size_check(cvp->cookie_desired) :
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
opts->mss = tcp_advertise_mss(sk);
remaining -= TCPOLEN_MSS_ALIGNED;
- if (likely(sysctl_tcp_timestamps &&
- !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
- *md5 == NULL)) {
+ if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
- if (likely(sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
+ if (likely(sysctl_tcp_window_scaling)) {
opts->ws = tp->rx_opt.rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
- if (likely(sysctl_tcp_sack &&
- !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
+ if (likely(sysctl_tcp_sack)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!(OPTION_TS & opts->options)))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk)
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
*/
tp->tcp_header_len = sizeof(struct tcphdr) +
- (sysctl_tcp_timestamps &&
- (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
- TCPOLEN_TSTAMP_ALIGNED : 0));
+ (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk)
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
&tp->window_clamp,
- (sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)),
+ sysctl_tcp_window_scaling,
&rcv_wscale);
tp->rx_opt.rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f9534846ca..f0126fdd7e0 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
rand = (rand | 1) * (udptable->mask + 1);
- for (last = first + udptable->mask + 1;
- first != last;
- first++) {
+ last = first + udptable->mask + 1;
+ do {
hslot = udp_hashslot(udptable, net, first);
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
- }
+ } while (++first != last);
goto fail;
} else {
hslot = udp_hashslot(udptable, net, snum);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd48801a8d6..eb6d0972863 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -121,10 +121,9 @@ static int ip6_output2(struct sk_buff *skb)
skb->dev = dev;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
- struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
- if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
+ if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
((mroute6_socket(dev_net(dev)) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 5f2ec208a8c..0956ebabbff 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -20,6 +20,7 @@
#include <net/ipv6.h>
#include <net/inet_frag.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -187,6 +188,21 @@ out:
return nf_conntrack_confirm(skb);
}
+static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING)
+ return IP6_DEFRAG_CONNTRACK_IN;
+ else
+ return IP6_DEFRAG_CONNTRACK_OUT;
+
+}
+
static unsigned int ipv6_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
if (skb->nfct)
return NF_ACCEPT;
- reasm = nf_ct_frag6_gather(skb);
-
+ reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
/* queued */
if (reasm == NULL)
return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e0b9424fa1b..312c20adc83 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -168,13 +168,14 @@ out:
/* Creation primitives. */
static __inline__ struct nf_ct_frag6_queue *
-fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
+fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
unsigned int hash;
arg.id = id;
+ arg.user = user;
arg.src = src;
arg.dst = dst;
@@ -559,7 +560,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
return 0;
}
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
{
struct sk_buff *clone;
struct net_device *dev = skb->dev;
@@ -605,7 +606,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
nf_ct_frag6_evictor();
- fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
+ fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
goto ret_orig;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4d98549a686..2cddea3bd6b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -72,6 +72,7 @@ struct frag_queue
struct inet_frag_queue q;
__be32 id; /* fragment id */
+ u32 user;
struct in6_addr saddr;
struct in6_addr daddr;
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
struct ip6_create_arg *arg = a;
fq = container_of(q, struct frag_queue, q);
- return (fq->id == arg->id &&
+ return (fq->id == arg->id && fq->user == arg->user &&
ipv6_addr_equal(&fq->saddr, arg->src) &&
ipv6_addr_equal(&fq->daddr, arg->dst));
}
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
struct ip6_create_arg *arg = a;
fq->id = arg->id;
+ fq->user = arg->user;
ipv6_addr_copy(&fq->saddr, arg->src);
ipv6_addr_copy(&fq->daddr, arg->dst);
}
@@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
unsigned int hash;
arg.id = id;
+ arg.user = IP6_DEFRAG_LOCAL_DELIVER;
arg.src = src;
arg.dst = dst;
@@ -705,7 +708,8 @@ static void ip6_frags_ns_sysctl_unregister(struct net *net)
table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
- kfree(table);
+ if (!net_eq(net, &init_net))
+ kfree(table);
}
static struct ctl_table_header *ip6_ctl_header;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index db3b2730389..c2bd74c5f8d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2630,6 +2630,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
+ table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
}
return table;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5b9af508b8f..7208a06576c 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+
+ if (tcp_opt.saw_tstamp)
+ cookie_check_timestamp(&tcp_opt);
+
ret = NULL;
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
if (!req)
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->expires = 0UL;
req->retrans = 0;
ireq->ecn_ok = 0;
+ ireq->snd_wscale = tcp_opt.snd_wscale;
+ ireq->rcv_wscale = tcp_opt.rcv_wscale;
+ ireq->sack_ok = tcp_opt.sack_ok;
+ ireq->wscale_ok = tcp_opt.wscale_ok;
+ ireq->tstamp_ok = tcp_opt.saw_tstamp;
+ req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out_free;
}
- /* check for timestamp cookie support */
- memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
-
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
-
- req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-
- ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
- ireq->sack_ok = tcp_opt.sack_ok;
- ireq->wscale_ok = tcp_opt.wscale_ok;
- ireq->tstamp_ok = tcp_opt.saw_tstamp;
-
req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ee9cf62458d..febfd595a40 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct inet6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 356e65b1dc4..783c5f367d2 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -450,10 +450,10 @@ void irlap_disconnect_request(struct irlap_cb *self)
/* Check if we are in the right state for disconnecting */
switch (self->state) {
- case LAP_XMIT_P: /* FALLTROUGH */
- case LAP_XMIT_S: /* FALLTROUGH */
- case LAP_CONN: /* FALLTROUGH */
- case LAP_RESET_WAIT: /* FALLTROUGH */
+ case LAP_XMIT_P: /* FALLTHROUGH */
+ case LAP_XMIT_S: /* FALLTHROUGH */
+ case LAP_CONN: /* FALLTHROUGH */
+ case LAP_RESET_WAIT: /* FALLTHROUGH */
case LAP_RESET_CHECK:
irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
break;
@@ -485,9 +485,9 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
irlap_do_event(self, RESET_REQUEST, NULL, NULL);
break;
- case LAP_NO_RESPONSE: /* FALLTROUGH */
- case LAP_DISC_INDICATION: /* FALLTROUGH */
- case LAP_FOUND_NONE: /* FALLTROUGH */
+ case LAP_NO_RESPONSE: /* FALLTHROUGH */
+ case LAP_DISC_INDICATION: /* FALLTHROUGH */
+ case LAP_FOUND_NONE: /* FALLTHROUGH */
case LAP_MEDIA_BUSY:
irlmp_link_disconnect_indication(self->notify.instance, self,
reason, NULL);
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index c5c51959e3c..94a9884d714 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -1741,7 +1741,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
* Function irlap_state_xmit_s (event, skb, info)
*
* XMIT_S, The secondary station has been given the right to transmit,
- * and we therefor do not expect to receive any transmissions from other
+ * and we therefore do not expect to receive any transmissions from other
* stations.
*/
static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 7bf5b913828..0e7d8bde145 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -105,7 +105,7 @@ int __init irlmp_init(void)
init_timer(&irlmp->discovery_timer);
- /* Do discovery every 3 seconds, conditionaly */
+ /* Do discovery every 3 seconds, conditionally */
if (sysctl_discovery)
irlmp_start_discovery_timer(irlmp,
sysctl_discovery_timeout*HZ);
@@ -1842,7 +1842,7 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason)
reason = LM_CONNECT_FAILURE;
break;
default:
- IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n",
+ IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n",
__func__, lap_reason);
reason = LM_LAP_DISCONNECT;
break;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 84209fbbeb1..76fa6fef647 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->aalg->alg_key_len = key->sadb_key_bits;
memcpy(x->aalg->alg_key, key+1, keysize);
}
+ x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits;
x->props.aalgo = sa->sadb_sa_auth;
/* x->algo.flags = sa->sadb_sa_flags; */
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a8da23905c7..0192cfdacae 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -244,7 +244,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
* @addr: destination address of the path (ETH_ALEN length)
* @sdata: local subif
*
- * Returns: 0 on sucess
+ * Returns: 0 on success
*
* State: the initial state of the new path is set to 0
*/
@@ -532,7 +532,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
* @addr: dst address (ETH_ALEN length)
* @sdata: local subif
*
- * Returns: 0 if succesful
+ * Returns: 0 if successful
*/
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
{
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 79a69805221..f2d76238b9b 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -112,7 +112,8 @@ config IP_VS_RR
module, choose M here. If unsure, say N.
config IP_VS_WRR
- tristate "weighted round-robin scheduling"
+ tristate "weighted round-robin scheduling"
+ select GCD
---help---
The weighted robin-robin scheduling algorithm directs network
connections to different real servers based on server weights
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b95699f0054..847ffca4018 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1366,6 +1366,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
== sysctl_ip_vs_sync_threshold[0])) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
+ (cp->state == IP_VS_TCP_S_CLOSE) ||
(cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
(cp->state == IP_VS_TCP_S_TIME_WAIT)))))
ip_vs_sync_conn(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e55a6861d26..c37ac2d7bec 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2077,6 +2077,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
+ return -EINVAL;
+ if (len < 0 || len > MAX_ARG_LEN)
+ return -EINVAL;
if (len != set_arglen[SET_CMDID(cmd)]) {
pr_err("set_ctl: len %u != %u\n",
len, set_arglen[SET_CMDID(cmd)]);
@@ -2352,17 +2356,25 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
+ unsigned int copylen;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
+ return -EINVAL;
+
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
- if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
+ copylen = get_arglen[GET_CMDID(cmd)];
+ if (copylen > 128)
+ return -EINVAL;
+
+ if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
if (mutex_lock_interruptible(&__ip_vs_mutex))
@@ -2714,6 +2726,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
return -EINVAL;
+ memset(usvc, 0, sizeof(*usvc));
+
usvc->af = nla_get_u16(nla_af);
#ifdef CONFIG_IP_VS_IPV6
if (usvc->af != AF_INET && usvc->af != AF_INET6)
@@ -2901,6 +2915,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
if (!(nla_addr && nla_port))
return -EINVAL;
+ memset(udest, 0, sizeof(*udest));
+
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_u16(nla_port);
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 6182e8ea0be..3c115fc1978 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/net.h>
+#include <linux/gcd.h>
#include <net/ip_vs.h>
@@ -38,20 +39,6 @@ struct ip_vs_wrr_mark {
};
-/*
- * Get the gcd of server weights
- */
-static int gcd(int a, int b)
-{
- int c;
-
- while ((c = a % b)) {
- a = b;
- b = c;
- }
- return b;
-}
-
static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 38ea7ef3ccd..f0732aa18e4 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -323,24 +323,24 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
struct nf_ct_ftp_master *info, int dir,
struct sk_buff *skb)
{
- unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
+ unsigned int i, oldest;
/* Look for oldest: if we find exact match, we're done. */
for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
if (info->seq_aft_nl[dir][i] == nl_seq)
return;
-
- if (oldest == info->seq_aft_nl_num[dir] ||
- before(info->seq_aft_nl[dir][i],
- info->seq_aft_nl[dir][oldest]))
- oldest = i;
}
if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
- } else if (oldest != NUM_SEQ_TO_REMEMBER &&
- after(nl_seq, info->seq_aft_nl[dir][oldest])) {
- info->seq_aft_nl[dir][oldest] = nl_seq;
+ } else {
+ if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1]))
+ oldest = 0;
+ else
+ oldest = 1;
+
+ if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
+ info->seq_aft_nl[dir][oldest] = nl_seq;
}
}
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 7a10bbe02c1..c5d9f97ef21 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -682,7 +682,7 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
* buckets and @skip_chain entries. For each entry in the table call
* @callback, if @callback returns a negative value stop 'walking' through the
* table and return. Updates the values in @skip_bkt and @skip_chain on
- * return. Returns zero on succcess, negative values on failure.
+ * return. Returns zero on success, negative values on failure.
*
*/
int netlbl_domhsh_walk(u32 *skip_bkt,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 020562164b5..f126d18dbdc 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct net_device *dev;
__be16 proto = 0;
int err;
@@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
*/
saddr->spkt_device[13] = 0;
+retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
@@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
if (len > dev->mtu + dev->hard_header_len)
goto out_unlock;
- err = -ENOBUFS;
- skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
-
- /*
- * If the write buffer is full, then tough. At this level the user
- * gets to deal with the problem - do your own algorithmic backoffs.
- * That's far more flexible.
- */
-
- if (skb == NULL)
- goto out_unlock;
-
- /*
- * Fill it in
- */
-
- /* FIXME: Save some space for broken drivers that write a
- * hard header at transmission time by themselves. PPP is the
- * notable one here. This should really be fixed at the driver level.
- */
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
-
- /* Try to align data part correctly */
- if (dev->header_ops) {
- skb->data -= dev->hard_header_len;
- skb->tail -= dev->hard_header_len;
- if (len < dev->hard_header_len)
- skb_reset_network_header(skb);
+ if (!skb) {
+ size_t reserved = LL_RESERVED_SPACE(dev);
+ unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
+
+ rcu_read_unlock();
+ skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+ if (skb == NULL)
+ return -ENOBUFS;
+ /* FIXME: Save some space for broken drivers that write a hard
+ * header at transmission time by themselves. PPP is the notable
+ * one here. This should really be fixed at the driver level.
+ */
+ skb_reserve(skb, reserved);
+ skb_reset_network_header(skb);
+
+ /* Try to align data part correctly */
+ if (hhlen) {
+ skb->data -= hhlen;
+ skb->tail -= hhlen;
+ if (len < hhlen)
+ skb_reset_network_header(skb);
+ }
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ if (err)
+ goto out_free;
+ goto retry;
}
- /* Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- if (err)
- goto out_free;
-
- /*
- * Now send it
- */
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
-out_free:
- kfree_skb(skb);
out_unlock:
rcu_read_unlock();
+out_free:
+ kfree_skb(skb);
return err;
}
@@ -1030,8 +1021,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
status = TP_STATUS_SEND_REQUEST;
err = dev_queue_xmit(skb);
- if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
- goto out_xmit;
+ if (unlikely(err > 0)) {
+ err = net_xmit_errno(err);
+ if (err && __packet_get_status(po, ph) ==
+ TP_STATUS_AVAILABLE) {
+ /* skb was destructed already */
+ skb = NULL;
+ goto out_status;
+ }
+ /*
+ * skb was dropped but not destructed yet;
+ * let's treat it like congestion or err < 0
+ */
+ err = 0;
+ }
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
} while (likely((ph != NULL) ||
@@ -1042,9 +1045,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
err = len_sum;
goto out_put;
-out_xmit:
- skb->destructor = sock_wfree;
- atomic_dec(&po->tx_ring.pending);
out_status:
__packet_set_status(po, ph, status);
kfree_skb(skb);
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 114df6eec8c..968e8bac1b5 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -75,7 +75,7 @@ static void rose_loopback_timer(unsigned long param)
lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
frametype = skb->data[2];
dest = (rose_address *)(skb->data + 4);
- lci_o = 0xFFF - lci_i;
+ lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
skb_reset_transport_header(skb);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 2a740035aa6..64f5e328cee 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -598,7 +598,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
goto errout;
/* compat_mode being true specifies a call that is supposed
- * to add additional backward compatiblity statistic TLVs.
+ * to add additional backward compatibility statistic TLVs.
*/
if (compat_mode) {
if (a->type == TCA_OLD_COMPAT)
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d771cc1b777..4e4ca65cd32 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -717,7 +717,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
if (sctp_style(sk, TCP)) {
/* Change the sk->sk_state of a TCP-style socket that has
- * sucessfully completed a connect() call.
+ * successfully completed a connect() call.
*/
if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
sk->sk_state = SCTP_SS_ESTABLISHED;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 1ef9de9bbae..47bc20d3a85 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3577,7 +3577,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
* To do this properly, we'll set the destination address of the chunk
* and at the transmit time, will try look up the transport to use.
* Since ASCONFs may be bundled, the correct transport may not be
- * created untill we process the entire packet, thus this workaround.
+ * created until we process the entire packet, thus this workaround.
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 89ab66e5474..67fdac9d2d3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2087,8 +2087,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
/* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
- if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) )
- sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ;
+ sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
return 0;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index f11be72a1a8..b15e1ebb2bf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -54,7 +54,7 @@
* Assumptions:
* - head[0] is physically contiguous.
* - tail[0] is physically contiguous.
- * - pages[] is not physically or virtually contigous and consists of
+ * - pages[] is not physically or virtually contiguous and consists of
* PAGE_SIZE elements.
*
* Output:
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index ca269178c4d..35f370091f4 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -62,7 +62,7 @@
* Called when wanting to reset the device for any reason. Device is
* taken back to power on status.
*
- * This call blocks; on succesful return, the device has completed the
+ * This call blocks; on successful return, the device has completed the
* reset process and is ready to operate.
*/
int wimax_reset(struct wimax_dev *wimax_dev)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cb81ca35b0d..4725a549ad4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1445,7 +1445,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (!dev)
goto free_dst;
- /* Copy neighbout for reachability confirmation */
+ /* Copy neighbour for reachability confirmation */
dst0->neighbour = neigh_clone(dst->neighbour);
xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);