diff options
Diffstat (limited to 'net')
32 files changed, 117 insertions, 86 deletions
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index db9f5b39388..813e399220a 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -54,7 +54,7 @@ MODULE_LICENSE("GPL"); /**************** DCB attribute policies *************************************/ /* DCB netlink attributes policy */ -static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { +static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, [DCB_ATTR_STATE] = {.type = NLA_U8}, [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, @@ -68,7 +68,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { }; /* DCB priority flow control to User Priority nested attributes */ -static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { +static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, @@ -81,7 +81,7 @@ static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { }; /* DCB priority grouping nested attributes */ -static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { +static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, @@ -103,7 +103,7 @@ static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { }; /* DCB traffic class nested attributes. */ -static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { +static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, @@ -112,7 +112,7 @@ static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { }; /* DCB capabilities nested attributes. */ -static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { +static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, @@ -124,14 +124,14 @@ static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { }; /* DCB capabilities nested attributes. */ -static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { +static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, }; /* DCB BCN nested attributes. */ -static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { +static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, @@ -160,7 +160,7 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { }; /* DCB APP nested attributes. */ -static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { +static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, [DCB_APP_ATTR_ID] = {.type = NLA_U16}, [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index a2a5983dbf0..c0c5274d027 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -793,7 +793,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev } if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ip_rt_put(rt); goto tx_error; } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 7e3712ce399..c1bc074f61b 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -576,6 +576,20 @@ static struct ctl_table ipv4_table[] = { .proc_handler = proc_dointvec }, { + .procname = "tcp_thin_linear_timeouts", + .data = &sysctl_tcp_thin_linear_timeouts, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { + .procname = "tcp_thin_dupack", + .data = &sysctl_tcp_thin_dupack, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "udp_mem", .data = &sysctl_udp_mem, .maxlen = sizeof(sysctl_udp_mem), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e471d037fcc..5901010fad5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2229,6 +2229,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level, } break; + case TCP_THIN_LINEAR_TIMEOUTS: + if (val < 0 || val > 1) + err = -EINVAL; + else + tp->thin_lto = val; + break; + + case TCP_THIN_DUPACK: + if (val < 0 || val > 1) + err = -EINVAL; + else + tp->thin_dupack = val; + break; + case TCP_CORK: /* When set indicates to always queue non-full frames. * Later the user clears this option and we transmit diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3fddc69cccc..788851ca8c5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -89,6 +89,8 @@ int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_frto_response __read_mostly; int sysctl_tcp_nometrics_save __read_mostly; +int sysctl_tcp_thin_dupack __read_mostly; + int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_abc __read_mostly; @@ -2447,6 +2449,16 @@ static int tcp_time_to_recover(struct sock *sk) return 1; } + /* If a thin stream is detected, retransmit after first + * received dupack. Employ only if SACK is supported in order + * to avoid possible corner-case series of spurious retransmissions + * Use only if there are no unsent data. + */ + if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && + tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && + tcp_is_sack(tp) && !tcp_send_head(sk)) + return 1; + return 0; } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index de7d1bf9114..a17629b8912 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -29,6 +29,7 @@ int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL; int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; int sysctl_tcp_orphan_retries __read_mostly; +int sysctl_tcp_thin_linear_timeouts __read_mostly; static void tcp_write_timer(unsigned long); static void tcp_delack_timer(unsigned long); @@ -415,7 +416,25 @@ void tcp_retransmit_timer(struct sock *sk) icsk->icsk_retransmits++; out_reset_timer: - icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); + /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is + * used to reset timer, set to 0. Recalculate 'icsk_rto' as this + * might be increased if the stream oscillates between thin and thick, + * thus the old value might already be too high compared to the value + * set by 'tcp_set_rto' in tcp_input.c which resets the rto without + * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating + * exponential backoff behaviour to avoid continue hammering + * linear-timeout retransmissions into a black hole + */ + if (sk->sk_state == TCP_ESTABLISHED && + (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) && + tcp_stream_is_thin(tp) && + icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { + icsk->icsk_backoff = 0; + icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); + } else { + /* Use normal (exponential) backoff */ + icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); + } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) __sk_dst_reset(sk); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 4bac362b133..074f2c084f9 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -481,7 +481,7 @@ looped_back: IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, - 0, skb->dev); + 0); kfree_skb(skb); return -1; } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 217dbc2e28d..eb9abe24bdf 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -67,11 +67,6 @@ #include <asm/uaccess.h> #include <asm/system.h> -DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly; -EXPORT_SYMBOL(icmpv6_statistics); -DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly; -EXPORT_SYMBOL(icmpv6msg_statistics); - /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout @@ -119,7 +114,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk) */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { - icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); + icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos); kfree_skb(skb); } @@ -305,8 +300,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {} /* * Send an ICMP message in response to a packet in error */ -void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - struct net_device *dev) +void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 77e122f53ea..2f9847924fa 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -93,29 +93,20 @@ static __u32 rt_sernum; static void fib6_gc_timer_cb(unsigned long arg); -static struct fib6_walker_t fib6_walker_list = { - .prev = &fib6_walker_list, - .next = &fib6_walker_list, -}; - -#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next) +static LIST_HEAD(fib6_walkers); +#define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh) static inline void fib6_walker_link(struct fib6_walker_t *w) { write_lock_bh(&fib6_walker_lock); - w->next = fib6_walker_list.next; - w->prev = &fib6_walker_list; - w->next->prev = w; - w->prev->next = w; + list_add(&w->lh, &fib6_walkers); write_unlock_bh(&fib6_walker_lock); } static inline void fib6_walker_unlink(struct fib6_walker_t *w) { write_lock_bh(&fib6_walker_lock); - w->next->prev = w->prev; - w->prev->next = w->next; - w->prev = w->next = w; + list_del(&w->lh); write_unlock_bh(&fib6_walker_lock); } static __inline__ u32 fib6_new_sernum(void) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 237e2dba6e9..e28f9203dec 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -216,8 +216,7 @@ resubmit: IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, - ICMPV6_UNK_NEXTHDR, nhoff, - skb->dev); + ICMPV6_UNK_NEXTHDR, nhoff); } } else IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index eb6d0972863..1a5fe9ad194 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -267,7 +267,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, if (net_ratelimit()) printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); skb->dev = dst->dev; - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; @@ -441,8 +441,7 @@ int ip6_forward(struct sk_buff *skb) if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; - icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, - 0, skb->dev); + icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); @@ -504,7 +503,7 @@ int ip6_forward(struct sk_buff *skb) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, - ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); + ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } @@ -512,7 +511,7 @@ int ip6_forward(struct sk_buff *skb) if (skb->len > dst_mtu(dst)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst)); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, @@ -627,7 +626,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) */ if (!skb->local_df) { skb->dev = skb_dst(skb)->dev; - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9b02492d870..138980eec21 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -622,7 +622,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (rt && rt->rt6i_dev) skb2->dev = rt->rt6i_dev; - icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); + icmpv6_send(skb2, rel_type, rel_code, rel_info); if (rt) dst_release(&rt->u.dst); @@ -1014,7 +1014,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; if (tel->encap_limit == 0) { icmpv6_send(skb, ICMPV6_PARAMPROB, - ICMPV6_HDR_FIELD, offset + 2, skb->dev); + ICMPV6_HDR_FIELD, offset + 2); return -1; } encap_limit = tel->encap_limit - 1; @@ -1033,7 +1033,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); if (err != 0) { if (err == -EMSGSIZE) - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); return -1; } diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index f797e8c6f3b..2794b600283 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -56,7 +56,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen) static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos) { - icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); + icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos); } static int mip6_mh_len(int type) diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 8311ca31816..dd8afbaf00a 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -169,7 +169,7 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code, if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) skb_in->dev = net->loopback_dev; - icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL); + icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); } static unsigned int diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index b2847ed6a7d..a555156e977 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -228,7 +228,7 @@ static void ip6_frag_expire(unsigned long data) pointer directly, device might already disappeared. */ fq->q.fragments->dev = dev; - icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); + icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); out_rcu_unlock: rcu_read_unlock(); out: diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8500156f263..88c0a5c49ae 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -909,7 +909,7 @@ static void ip6_link_failure(struct sk_buff *skb) { struct rt6_info *rt; - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); rt = (struct rt6_info *) skb_dst(skb); if (rt) { @@ -1884,7 +1884,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) ipstats_mib_noroutes); break; } - icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); kfree_skb(skb); return 0; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 96eb2d4641c..b1eea811be4 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -743,7 +743,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if (skb->len > mtu) { - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ip_rt_put(rt); goto tx_error; } diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 51e2832d13a..e17bc1dfc1a 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c @@ -98,7 +98,7 @@ static int tunnel6_rcv(struct sk_buff *skb) if (!handler->handler(skb)) return 0; - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); drop: kfree_skb(skb); @@ -116,7 +116,7 @@ static int tunnel46_rcv(struct sk_buff *skb) if (!handler->handler(skb)) return 0; - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); drop: kfree_skb(skb); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index a7af9d68cd6..52b8347ae3b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -680,12 +680,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { + struct net *net = dev_net(skb->dev); struct sock *sk; struct udphdr *uh; - struct net_device *dev = skb->dev; struct in6_addr *saddr, *daddr; u32 ulen = 0; - struct net *net = dev_net(skb->dev); if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto short_packet; @@ -744,7 +743,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index c4f4eef032a..0c92112dcba 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -38,7 +38,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) if (!skb->local_df && skb->len > mtu) { skb->dev = dst->dev; - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ret = -EMSGSIZE; } diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index 476b307bd80..69b5b75f543 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c @@ -124,7 +124,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info) return ret; } -static struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = { +static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = { [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 }, diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 72e96d823eb..44590887a92 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -515,8 +515,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, */ #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, - skb->dev); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); else #endif icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); @@ -1048,7 +1047,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, - 0, skb->dev); + 0); else #endif icmp_send(skb, diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 30b3189bd29..223b5018c7d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -311,7 +311,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, mtu = dst_mtu(&rt->u.dst); if (skb->len > mtu) { dst_release(&rt->u.dst); - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; } @@ -454,7 +454,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, mtu = dst_mtu(&rt->u.dst); if (skb->len > mtu) { dst_release(&rt->u.dst); - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit_v6(): frag needed for"); goto tx_error; @@ -672,7 +672,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); dst_release(&rt->u.dst); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; @@ -814,7 +814,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* MTU checking */ mtu = dst_mtu(&rt->u.dst); if (skb->len > mtu) { - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); dst_release(&rt->u.dst); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; @@ -965,7 +965,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, mtu = dst_mtu(&rt->u.dst); if (skb->len > mtu) { dst_release(&rt->u.dst); - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 9bc9b92bc09..3d9122e78f4 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) /* * SMP locking strategy: * hash table is protected with spinlock unix_table_lock - * each socket state is protected by separate rwlock. + * each socket state is protected by separate spin lock. */ static inline unsigned unix_hash_fold(__wsum n) diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index d3bfb6ef13a..7718657e93d 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c @@ -320,8 +320,7 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, EXPORT_SYMBOL_GPL(wimax_msg); -static const -struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = { +static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = { [WIMAX_GNL_MSG_IFIDX] = { .type = NLA_U32, }, diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index 35f370091f4..4dc82a54ba3 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c @@ -91,8 +91,7 @@ int wimax_reset(struct wimax_dev *wimax_dev) EXPORT_SYMBOL(wimax_reset); -static const -struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = { +static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = { [WIMAX_GNL_RESET_IFIDX] = { .type = NLA_U32, }, diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index ae752a64d92..e978c7136c9 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c @@ -410,8 +410,7 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev) * just query). */ -static const -struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = { +static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = { [WIMAX_GNL_RFKILL_IFIDX] = { .type = NLA_U32, }, diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c index a76b8fcb056..11ad3356eb5 100644 --- a/net/wimax/op-state-get.c +++ b/net/wimax/op-state-get.c @@ -33,8 +33,7 @@ #include "debug-levels.h" -static const -struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = { +static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = { [WIMAX_GNL_STGET_IFIDX] = { .type = NLA_U32, }, diff --git a/net/wimax/stack.c b/net/wimax/stack.c index c8866412f83..813e1eaea29 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -75,8 +75,7 @@ MODULE_PARM_DESC(debug, * close to where the data is generated. */ /* -static const -struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = { +static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = { [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 }, [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 }, }; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5b79ecf17be..a001ea32cb7 10064 |