diff options
Diffstat (limited to 'net')
96 files changed, 767 insertions, 599 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index 8e21b6db398..a5c22483043 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -167,7 +167,8 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, return NULL; } -static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new) +static struct garp_attr *garp_attr_create(struct garp_applicant *app, + const void *data, u8 len, u8 type) { struct rb_node *parent = NULL, **p = &app->gid.rb_node; struct garp_attr *attr; @@ -176,21 +177,16 @@ static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new) while (*p) { parent = *p; attr = rb_entry(parent, struct garp_attr, node); - d = garp_attr_cmp(attr, new->data, new->dlen, new->type); + d = garp_attr_cmp(attr, data, len, type); if (d < 0) p = &parent->rb_left; else if (d > 0) p = &parent->rb_right; + else { + /* The attribute already exists; re-use it. */ + return attr; + } } - rb_link_node(&new->node, parent, p); - rb_insert_color(&new->node, &app->gid); -} - -static struct garp_attr *garp_attr_create(struct garp_applicant *app, - const void *data, u8 len, u8 type) -{ - struct garp_attr *attr; - attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); if (!attr) return attr; @@ -198,7 +194,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app, attr->type = type; attr->dlen = len; memcpy(attr->data, data, len); - garp_attr_insert(app, attr); + + rb_link_node(&attr->node, parent, p); + rb_insert_color(&attr->node, &app->gid); return attr; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9988d4abb37..9757c193c86 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, skb = __vlan_hwaccel_put_tag(skb, vlan_tci); } - skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); + skb->dev = vlan_dev_priv(dev)->real_dev; len = skb->len; if (netpoll_tx_running(dev)) return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 0906c194a41..9d9a6a3edbd 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -2011,16 +2011,17 @@ static void __exit ax25_exit(void) proc_net_remove(&init_net, "ax25_route"); proc_net_remove(&init_net, "ax25"); proc_net_remove(&init_net, "ax25_calls"); - ax25_rt_free(); - ax25_uid_free(); - ax25_dev_free(); - ax25_unregister_sysctl(); unregister_netdevice_notifier(&ax25_dev_notifier); + ax25_unregister_sysctl(); dev_remove_pack(&ax25_packet_type); sock_unregister(PF_AX25); proto_unregister(&ax25_proto); + + ax25_rt_free(); + ax25_uid_free(); + ax25_dev_free(); } module_exit(ax25_exit); diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 72eb187a5f6..6fb68a9743a 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa sk->sk_state == BT_CONFIG) return mask; - if (sock_writeable(sk)) + if (!bt_sk(sk)->suspended && sock_writeable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e33af63a884..d6dc44cd15b 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -665,6 +665,11 @@ int hci_dev_open(__u16 dev) hci_req_lock(hdev); + if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { + ret = -ENODEV; + goto done; + } + if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { ret = -ERFKILL; goto done; @@ -1210,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) return NULL; } -static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, +static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, u8 key_type, u8 old_key_type) { /* Legacy key */ if (key_type < 0x03) - return 1; + return true; /* Debug keys are insecure so don't store them persistently */ if (key_type == HCI_LK_DEBUG_COMBINATION) - return 0; + return false; /* Changed combination key and there's no previous one */ if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) - return 0; + return false; /* Security mode 3 case */ if (!conn) - return 1; + return true; /* Neither local nor remote side had no-bonding as requirement */ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) - return 1; + return true; /* Local side had dedicated bonding as requirement */ if (conn->auth_type == 0x02 || conn->auth_type == 0x03) - return 1; + return true; /* Remote side had dedicated bonding as requirement */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) - return 1; + return true; /* If none of the above criteria match, then don't store the key * persistently */ - return 0; + return false; } struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) @@ -1280,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) { struct link_key *key, *old_key; - u8 old_key_type, persistent; + u8 old_key_type; + bool persistent; old_key = hci_find_link_key(hdev, bdaddr); if (old_key) { @@ -1323,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, mgmt_new_link_key(hdev, key, persistent); - if (!persistent) { - list_del(&key->list); - kfree(key); - } + if (conn) + conn->flush_key = !persistent; return 0; } @@ -1849,6 +1853,8 @@ void hci_unregister_dev(struct hci_dev *hdev) BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + set_bit(HCI_UNREGISTER, &hdev->dev_flags); + write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); @@ -2778,6 +2784,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) if (conn) { hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); + hci_dev_lock(hdev); + if (test_bit(HCI_MGMT, &hdev->dev_flags) && + !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + mgmt_device_connected(hdev, &conn->dst, conn->type, + conn->dst_type, 0, NULL, 0, + conn->dev_class); + hci_dev_unlock(hdev); + /* Send to upper protocol */ l2cap_recv_acldata(conn, skb, flags); return; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index b37531094c4..1266f78fa8e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff } if (ev->status == 0) { + if (conn->type == ACL_LINK && conn->flush_key) + hci_remove_link_key(hdev, &conn->dst); hci_proto_disconn_cfm(conn, ev->reason); hci_conn_del(conn); } @@ -2037,6 +2039,12 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + if (ev->status && conn->state == BT_CONNECTED) { + hci_acl_disconn(conn, 0x13); + hci_conn_put(conn); + goto unlock; + } + if (conn->state == BT_CONFIG) { if (!ev->status) conn->state = BT_CONNECTED; @@ -2047,6 +2055,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff * hci_encrypt_cfm(conn, ev->status, ev->encrypt); } +unlock: hci_dev_unlock(hdev); } @@ -2100,7 +2109,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff goto unlock; } - if (!ev->status) { + if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); @@ -2311,6 +2320,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk case HCI_OP_USER_PASSKEY_NEG_REPLY: hci_cc_user_passkey_neg_reply(hdev, skb); + break; case HCI_OP_LE_SET_SCAN_PARAM: hci_cc_le_set_scan_param(hdev, skb); @@ -2868,7 +2878,7 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b if (conn->state != BT_CONFIG) goto unlock; - if (!ev->status) { + if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index b8e17e4dac8..6f9c25b633a 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1308,6 +1308,7 @@ static void l2cap_monitor_timeout(struct work_struct *work) if (chan->retry_count >= chan->remote_max_tx) { l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return; } @@ -1316,6 +1317,7 @@ static void l2cap_monitor_timeout(struct work_struct *work) l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } static void l2cap_retrans_timeout(struct work_struct *work) @@ -1335,6 +1337,7 @@ static void l2cap_retrans_timeout(struct work_struct *work) l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } static void l2cap_drop_acked_frames(struct l2cap_chan *chan) @@ -4586,6 +4589,11 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) if (!status && (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)) { + struct sock *sk = chan->sk; + + bt_sk(sk)->suspended = false; + sk->sk_state_change(sk); + l2cap_check_encryption(chan, encrypt); l2cap_chan_unlock(chan); continue; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index c4fe583b0af..04e7c172d49 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -82,7 +82,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) } if (la.l2_cid) - err = l2cap_add_scid(chan, la.l2_cid); + err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid)); else err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm); @@ -123,7 +123,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al if (la.l2_cid && la.l2_psm) return -EINVAL; - err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr); + err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), + &la.l2_bdaddr); if (err) return err; @@ -591,10 +592,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch sk->sk_state = BT_CONFIG; chan->state = BT_CONFIG; - /* or for ACL link, under defer_setup time */ - } else if (sk->sk_state == BT_CONNECT2 && - bt_sk(sk)->defer_setup) { - err = l2cap_chan_check_security(chan); + /* or for ACL link */ + } else if ((sk->sk_state == BT_CONNECT2 && + bt_sk(sk)->defer_setup) || + sk->sk_state == BT_CONNECTED) { + if (!l2cap_chan_check_security(chan)) + bt_sk(sk)->suspended = true; + else + sk->sk_state_change(sk); } else { err = -EINVAL; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7fcff888713..4bb03b11112 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2523,13 +2523,18 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, if (cp->val) { type = PAGE_SCAN_TYPE_INTERLACED; - acp.interval = 0x0024; /* 22.5 msec page scan interval */ + + /* 22.5 msec page scan interval */ + acp.interval = __constant_cpu_to_le16(0x0024); } else { type = PAGE_SCAN_TYPE_STANDARD; /* default */ - acp.interval = 0x0800; /* default 1.28 sec page scan */ + + /* default 1.28 sec page scan */ + acp.interval = __constant_cpu_to_le16(0x0800); } - acp.window = 0x0012; /* default 11.25 msec page scan window */ + /* default 11.25 msec page scan window */ + acp.window = __constant_cpu_to_le16(0x0012); err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), &acp); @@ -2879,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) return 0; } -int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent) +int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent) { struct mgmt_ev_new_link_key ev; @@ -2936,7 +2941,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, name, name_len); if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0) - eir_len = eir_append_data(&ev->eir[eir_len], eir_len, + eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, dev_class, 3); put_unaligned_le16(eir_len, &ev->eir_len); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 61f65344e71..a2098e3de50 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) kfree_skb(skb); } else { skb_push(skb, ETH_HLEN); + br_drop_fake_rtable(skb); dev_queue_xmit(skb); } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 702a1ae9220..27ca25ed702 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -241,7 +241,6 @@ static void br_multicast_group_expired(unsigned long data) hlist_del_rcu(&mp->hlist[mdb->ver]); mdb->size--; - del_timer(&mp->query_timer); call_rcu_bh(&mp->rcu, br_multicast_free_group); out: @@ -271,7 +270,6 @@ static void br_multicast_del_pg(struct net_bridge *br, rcu_assign_pointer(*pp, p->next); hlist_del_init(&p->mglist); del_timer(&p->timer); - del_timer(&p->query_timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); if (!mp->ports && !mp->mglist && @@ -507,74 +505,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, return NULL; } -static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) -{ - struct net_bridge *br = mp->br; - struct sk_buff *skb; - - skb = br_multicast_alloc_query(br, &mp->addr); - if (!skb) - goto timer; - - netif_rx(skb); - -timer: - if (++mp->queries_sent < br->multicast_last_member_count) - mod_timer(&mp->query_timer, - jiffies + br->multicast_last_member_interval); -} - -static void br_multicast_group_query_expired(unsigned long data) -{ - struct net_bridge_mdb_entry *mp = (void *)data; - struct net_bridge *br = mp->br; - - spin_lock(&br->multicast_lock); - if (!netif_running(br->dev) || !mp->mglist || - mp->queries_sent >= br->multicast_last_member_count) - goto out; - - br_multicast_send_group_query(mp); - -out: - spin_unlock(&br->multicast_lock); -} - -static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) -{ - struct net_bridge_port *port = pg->port; - struct net_bridge *br = port->br; - struct sk_buff *skb; - - skb = br_multicast_alloc_query(br, &pg->addr); - if (!skb) - goto timer; - - br_deliver(port, skb); - -timer: - if (++pg->queries_sent < br->multicast_last_member_count) - mod_timer(&pg->query_timer, - jiffies + br->multicast_last_member_interval); -} - -static void br_multicast_port_group_query_expired(unsigned long data) -{ - struct net_bridge_port_group *pg = (void *)data; - struct net_bridge_port *port = pg->port; - struct net_bridge *br = port->br; - - spin_lock(&br->multicast_lock); - if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || - pg->queries_sent >= br->multicast_last_member_count) - goto out; - - br_multicast_send_port_group_query(pg); - -out: - spin_unlock(&br->multicast_lock); -} - static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, int hash) @@ -690,8 +620,6 @@ rehash: mp->addr = *group; setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); - setup_timer(&mp->query_timer, br_multicast_group_query_expired, - (unsigned long)mp); hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); mdb->size++; @@ -746,8 +674,6 @@ static int br_multicast_add_group(struct net_bridge *br, hlist_add_head(&p->mglist, &port->mglist); setup_timer(&p->timer, br_multicast_port_group_expired, (unsigned long)p); - setup_timer(&p->query_timer, br_multicast_port_group_query_expired, - (unsigned long)p); rcu_assign_pointer(*pp, p); @@ -1291,9 +1217,6 @@ static void br_multicast_leave_group(struct net_bridge *br, time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { mod_timer(&mp->timer, time); - - mp->queries_sent = 0; - mod_timer(&mp->query_timer, now); } goto out; @@ -1310,9 +1233,6 @@ static void br_multicast_leave_group(struct net_bridge *br, time_after(p->timer.expires, time) : try_to_del_timer_sync(&p->timer) >= 0)) { mod_timer(&p->timer, time); - - p->queries_sent = 0; - mod_timer(&p->query_timer, now); } break; @@ -1681,7 +1601,6 @@ void br_multicast_stop(struct net_bridge *br) hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], hlist[ver]) { del_timer(&mp->timer); - del_timer(&mp->query_timer); call_rcu_bh(&mp->rcu, br_multicast_free_group); } } diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index dec4f381713..d7f49b63ab0 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) rt->dst.dev = br->dev; rt->dst.path = &rt->dst; dst_init_metrics(&rt->dst, br_dst_default_metrics, true); - rt->dst.flags = DST_NOXFRM | DST_NOPEER; + rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE; rt->dst.ops = &fake_dst_ops; } @@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, const struct net_device *out, int (*okfn)(struct sk_buff *)) { - struct rtable *rt = skb_rtable(skb); - - if (rt && rt == bridge_parent_rtable(in)) - skb_dst_drop(skb); - + br_drop_fake_rtable(skb); return NF_ACCEPT; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 0b67a63ad7a..e1d88225787 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -82,9 +82,7 @@ struct net_bridge_port_group { struct hlist_node mglist; struct rcu_head rcu; struct timer_list timer; - struct timer_list query_timer; struct br_ip addr; - u32 queries_sent; }; struct net_bridge_mdb_entry @@ -94,10 +92,8 @@ struct net_bridge_mdb_entry struct net_bridge_port_group __rcu *ports; struct rcu_head rcu; struct timer_list timer; - struct timer_list query_timer; struct br_ip addr; bool mglist; - u32 queries_sent; }; struct net_bridge_mdb_htable diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 20618dd3088..d09340e1523 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -103,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) skb->protocol = htons(ETH_P_IPV6); break; default: + kfree_skb(skb); priv->netdev->stats.rx_errors++; return -EINVAL; } @@ -220,14 +221,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->len > priv->netdev->mtu) { pr_warn("Size of skb exceeded MTU\n"); + kfree_skb(skb); dev->stats.tx_errors++; - return -ENOSPC; + return NETDEV_TX_OK; } if (!priv->flowenabled) { pr_debug("dropping packets flow off\n"); + kfree_skb(skb); dev->stats.tx_dropped++; - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) @@ -242,7 +245,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev) result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); if (result) { dev->stats.tx_dropped++; - return result; + return NETDEV_TX_OK; } /* Update statistics. */ diff --git a/net/core/dev.c b/net/core/dev.c index 5d59155adf2..99e1d759f41 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier); * register_netdevice_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. + * + * After unregistering unregister and down device events are synthesized + * for all devices on the device list to the removed notifier to remove + * the need for special case cleanup code. */ int unregister_netdevice_notifier(struct notifier_block *nb) { + struct net_device *dev; + struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_unregister(&netdev_chain, nb); + if (err) + goto unlock; + + for_each_net(net) { + for_each_netdev(net, dev) { + if (dev->flags & IFF_UP) { + nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); + nb->notifier_call(nb, NETDEV_DOWN, dev); + } + nb->notifier_call(nb, NETDEV_UNREGISTER, dev); + nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); + } + } +unlock: rtnl_unlock(); return err; } @@ -1596,10 +1616,15 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) kfree_skb(skb); return NET_RX_DROP; } - skb_set_dev(skb, dev); + skb->skb_iif = 0; + skb->dev = dev; + skb_dst_drop(skb); skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, dev); + skb->mark = 0; + secpath_reset(skb); + nf_reset(skb); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); @@ -1848,36 +1873,6 @@ void netif_device_attach(struct net_device *dev) } EXPORT_SYMBOL(netif_device_attach); -/** - * skb_dev_set -- assign a new device to a buffer - * @skb: buffer for the new device - * @dev: network device - * - * If an skb is owned by a device already, we have to reset - * all data private to the namespace a device belongs to - * before assigning it a new device. - */ -#ifdef CONFIG_NET_NS -void skb_set_dev(struct sk_buff *skb, struct net_device *dev) -{ - skb_dst_drop(skb); - if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { - secpath_reset(skb); - nf_reset(skb); - skb_init_secmark(skb); - skb->mark = 0; - skb->priority = 0; - skb->nf_trace = 0; - skb->ipvs_property = 0; -#ifdef CONFIG_NET_SCHED - skb->tc_index = 0; -#endif - } - skb->dev = dev; -} -EXPORT_SYMBOL(skb_set_dev); -#endif /* CONFIG_NET_NS */ - static void skb_warn_bad_offload(const struct sk_buff *skb) { static const netdev_features_t null_features = 0; @@ -4027,54 +4022,41 @@ static int dev_ifconf(struct net *net, char __user *arg) #ifdef CONFIG_PROC_FS -#define BUCKET_SPACE (32 - NETDEV_HASHBITS) - -struct dev_iter_state { - struct seq_net_private p; - unsigned int pos; /* bucket << BUCKET_SPACE + offset */ -}; +#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1) #define get_bucket(x) ((x) >> BUCKET_SPACE) #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) -static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) +static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos) { - struct dev_iter_state *state = seq->private; struct net *net = seq_file_net(seq); struct net_device *dev; struct hlist_node *p; struct hlist_head *h; - unsigned int count, bucket, offset; + unsigned int count = 0, offset = get_offset(*pos); - bucket = get_bucket(state->pos); - offset = get_offset(state->pos); - h = &net->dev_name_head[bucket]; - count = 0; + h = &net->dev_name_head[get_bucket(*pos)]; hlist_for_each_entry_rcu(dev, p, h, name_hlist) { - if (count++ == offset) { - state->pos = set_bucket_offset(bucket, count); + if (++count == offset) return dev; - } } return NULL; } -static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) +static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos) { - struct dev_iter_state *state = seq->private; struct net_device *dev; unsigned int bucket; - bucket = get_bucket(state->pos); do { - dev = dev_from_same_bucket(seq); + dev = dev_from_same_bucket(seq, pos); if (dev) return dev; - bucket++; - state->pos = set_bucket_offset(bucket, 0); + bucket = get_bucket(*pos) + 1; + *pos = set_bucket_offset(bucket, 1); } while (bucket < NETDEV_HASHENTRIES); return NULL; @@ -4087,33 +4069,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) void *dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { - struct dev_iter_state *state = seq->private; - rcu_read_lock(); if (!*pos) return SEQ_START_TOKEN; - /* check for end of the hash */ - if (state->pos == 0 && *pos > 1) + if (get_bucket(*pos) >= NETDEV_HASHENTRIES) return NULL; - return dev_from_new_bucket(seq); + return dev_from_bucket(seq, pos); } void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct net_device *dev; - ++*pos; - - if (v == SEQ_START_TOKEN) - return dev_from_new_bucket(seq); - - dev = dev_from_same_bucket(seq); - if (dev) - return dev; - - return dev_from_new_bucket(seq); + return dev_from_bucket(seq, pos); } void dev_seq_stop(struct seq_file *seq, void *v) @@ -4212,13 +4181,7 @@ static const struct seq_operations dev_seq_ops = { static int dev_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &dev_seq_ops, - sizeof(struct dev_iter_state)); -} - -int dev_seq_open_ops(struct inode *inode, struct file *file, - const struct seq_operations *ops) -{ - return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state)); + sizeof(struct seq_net_private)); } static const struct file_operations dev_seq_fops = { diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 29c07fef922..626698f0db8 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -696,7 +696,8 @@ static const struct seq_operations dev_mc_seq_ops = { static int dev_mc_seq_open(struct inode *inode, struct file *file) { - return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); + return seq_open_net(inode, file, &dev_mc_seq_ops, + sizeof(struct seq_net_private)); } static const struct file_operations dev_mc_seq_fops = { diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 7f36b38e060..a7cad741df0 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused); * netlink alerts */ static int trace_state = TRACE_OFF; -static DEFINE_SPINLOCK(trace_state_lock); +static DEFINE_MUTEX(trace_state_mutex); struct per_cpu_dm_data { struct work_struct dm_alert_work; - struct sk_buff *skb; + struct sk_buff __rcu *skb; atomic_t dm_hit_count; struct timer_list send_timer; + int cpu; }; struct dm_hw_stat_delta { @@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data) size_t al; struct net_dm_alert_msg *msg; struct nlattr *nla; + struct sk_buff *skb; + struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); al = sizeof(struct net_dm_alert_msg); al += dm_hit_limit * sizeof(struct net_dm_drop_point); al += sizeof(struct nlattr); - data->skb = genlmsg_new(al, GFP_KERNEL); - genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, - 0, NET_DM_CMD_ALERT); - nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); - msg = nla_data(nla); - memset(msg, 0, al); - atomic_set(&data->dm_hit_count, dm_hit_limit); + skb = genlmsg_new(al, GFP_KERNEL); + + if (skb) { + genlmsg_put(skb, 0, 0, &net_drop_monitor_family, + 0, NET_DM_CMD_ALERT); + nla = nla_reserve(skb, NLA_UNSPEC, + sizeof(struct net_dm_alert_msg)); + msg = nla_data(nla); + memset(msg, 0, al); + } else + schedule_work_on(data->cpu, &data->dm_alert_work); + + /* + * Don't need to lock this, since we are guaranteed to only + * run this on a single cpu at a time. + * Note also that we only update data->skb if the old and new skb + * pointers don't match. This ensures that we don't continually call + * synchornize_rcu if we repeatedly fail to alloc a new netlink message. + */ + if (skb != oskb) { + rcu_assign_pointer(data->skb, skb); + + synchronize_rcu(); + + atomic_set(&data->dm_hit_count, dm_hit_limit); + } + } static void send_dm_alert(struct work_struct *unused) { struct sk_buff *skb; - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + + WARN_ON_ONCE(data->cpu != smp_processor_id()); /* * Grab the skb we're about to send */ - skb = data->skb; + skb = rcu_dereference_protected(data->skb, 1); /* * Replace it with a new one @@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused) /* * Ship it! */ - genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); + if (skb) + genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); + put_cpu_var(dm_cpu_data); } /* @@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused) */ static void sched_send_work(unsigned long unused) { - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + + schedule_work_on(smp_processor_id(), &data->dm_alert_work); - schedule_work(&data->dm_alert_work); + put_cpu_var(dm_cpu_data); } static void trace_drop_common(struct sk_buff *skb, void *location) @@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location) struct nlmsghdr *nlh; struct nlattr *nla; int i; - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct sk_buff *dskb; + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + + + rcu_read_lock(); + dskb = rcu_dereference(data->skb); + if (!dskb) + goto out; if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { /* @@ -144,12 +180,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location) goto out; } - nlh = (struct nlmsghdr *)data->skb->data; + nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); for (i = 0; i < msg->entries; i++) { if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { msg->points[i].count++; + atomic_inc(&data->dm_hit_count); goto out; } } @@ -157,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location) /* * We need to create a new entry */ - __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); + __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); msg->points[msg->entries].count = 1; @@ -169,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location) } out: + rcu_read_unlock(); + put_cpu_var(dm_cpu_data); return; } @@ -213,7 +252,7 @@ static int set_all_monitor_traces(int state) struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *temp; - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); if (state == trace_state) { rc = -EAGAIN; @@ -252,7 +291,7 @@ static int set_all_monitor_traces(int state) rc = -EINPROGRESS; out_unlock: - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); return rc; } @@ -295,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block, new_stat->dev = dev; new_stat->last_rx = jiffies; - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); list_add_rcu(&new_stat->list, &hw_stats_list); - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); break; case NETDEV_UNREGISTER: - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { new_stat->dev = NULL; @@ -311,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, } } } - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); break; } out: @@ -367,13 +406,15 @@ static int __init init_net_drop_monitor(void) for_each_present_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); - reset_per_cpu_data(data); + data->cpu = cpu; INIT_WORK(&data->dm_alert_work, send_dm_alert); init_timer(&data->send_timer); data->send_timer.data = cpu; data->send_timer.function = sched_send_work; + reset_per_cpu_data(data); } + goto out; out_unreg: diff --git a/net/core/filter.c b/net/core/filter.c index cf4989ac503..6f755cca452 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -39,8 +39,11 @@ #include <linux/reciprocal_div.h> #include <linux/ratelimit.h> -/* No hurry in this branch */ -static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) +/* No hurry in this branch + * + * Exported for the bpf jit load helper. + */ +void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) { u8 *ptr = NULL; @@ -59,7 +62,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, { if (k >= 0) return skb_header_pointer(skb, k, size, buffer); - return __load_pointer(skb, k, size); + return bpf_internal_load_pointer_neg_helper(skb, k, size); } /** diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 0e950fda9a0..31a5ae51a45 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -83,21 +83,29 @@ assign: static int ops_init(const struct pernet_operations *ops, struct net *net) { - int err; + int err = -ENOMEM; + void *data = NULL; + if (ops->id && ops->size) { - void *data = kzalloc(ops->size, GFP_KERNEL); + data = kzalloc(ops->size, GFP_KERNEL); if (!data) - return -ENOMEM; + goto out; err = net_assign_generic(net, *ops->id, data); - if (err) { - kfree(data); - return err; - } + if (err) + goto cleanup; } + err = 0; if (ops->init) - return ops->init(net); - return 0; + err = ops->init(net); + if (!err) + return 0; + +cleanup: + kfree(data); + +out: + return err; } static void ops_free(const struct pernet_operations *ops, struct net *net) @@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { - int err = 0; - err = ops_init(ops, &init_net); - if (err) - ops_free(ops, &init_net); - return err; - + return ops_init(ops, &init_net); } static void __unregister_pernet_operations(struct pernet_operations *ops) diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 4d8ce93cd50..77a59980b57 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1931,7 +1931,7 @@ static int pktgen_device_event(struct notifier_block *unused, { struct net_device *dev = ptr; - if (!net_eq(dev_net(dev), &init_net)) + if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting) return NOTIFY_DONE; /* It is OK that we do not hold the group lock right now, @@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void) { struct pktgen_thread *t; struct list_head *q, *n; + struct list_head list; /* Stop all interfaces & threads */ pktgen_exiting = true; - list_for_each_safe(q, n, &pktgen_threads) { + mutex_lock(&pktgen_thread_lock); + list_splice(&list, &pktgen_threads); + mutex_unlock(&pktgen_thread_lock); + + list_for_each_safe(q, n, &list) { t = list_entry(q, struct pktgen_thread, th_list); + list_del(&t->th_list); kthread_stop(t->tsk); kfree(t); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f223cdc75da..e59840010d4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -952,9 +952,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, goto adjust_others; } - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); + data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + gfp_mask); if (!data) goto nodata; + size = SKB_WITH_OVERHEAD(ksize(data)); /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. @@ -3161,6 +3163,8 @@ static void sock_rmem_free(struct sk_buff *skb) */ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) { + int len = skb->len; + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf) return -ENOMEM; @@ -3175,7 +3179,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk, skb->len); + sk->sk_data_ready(sk, len); return 0; } EXPORT_SYMBOL(sock_queue_err_skb); diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 36851588536..840821b90bc 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -1044,6 +1044,24 @@ static void lowpan_dev_free(struct net_device *dev) free_netdev(dev); } +static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + return ieee802154_mlme_ops(real_dev)->get_phy(real_dev); +} + +static u16 lowpan_get_pan_id(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); +} + +static u16 lowpan_get_short_addr(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); +} + static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; @@ -1053,6 +1071,12 @@ static const struct net_device_ops lowpan_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; +static struct ieee802154_mlme_ops lowpan_mlme = { + .get_pan_id = lowpan_get_pan_id, + .get_phy = lowpan_get_phy, + .get_short_addr = lowpan_get_short_addr, +}; + static void lowpan_setup(struct net_device *dev) { pr_debug("(%s)\n", __func__); @@ -1070,6 +1094,7 @@ static void lowpan_setup(struct net_device *dev) dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; + dev->ml_priv = &lowpan_mlme; dev->destructor = lowpan_dev_free; } @@ -1143,6 +1168,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, list_add_tail(&entry->list, &lowpan_devices); mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); + spin_lock_init(&flist_lock); + register_netdevice(dev); return 0; @@ -1152,11 +1179,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head) { struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); struct net_device *real_dev = lowpan_dev->real_dev; - struct lowpan_dev_record *entry; - struct lowpan_dev_record *tmp; + struct lowpan_dev_record *entry, *tmp; + struct lowpan_fragment *frame, *tframe; ASSERT_RTNL(); + spin_lock(&flist_lock); + list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) { + del_timer(&frame->timer); + list_del(&frame->list); + dev_kfree_skb(frame->skb); + kfree(frame); + } + spin_unlock(&flist_lock); + mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (entry->ldev == dev) { diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index bce36f1a37b..30b88d7b4bd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) continue; + if (fi->fib_dead) + continue; if (fa->fa_info->fib_scope < flp->flowi4_scope) continue; fib_alias_accessed(fa); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8d25a1c557e..8f8db724bfa 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto rtattr_failure; if (icsk == NULL) { - r->idiag_rqueue = r->idiag_wqueue = 0; + handler->idiag_get_info(sk, r, NULL); goto out; } diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index de9da21113a..cf73cc70ed2 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -74,16 +74,24 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); if (iph == NULL) - return -NF_DROP; + return -NF_ACCEPT; /* Conntrack defragments packets, we might still see fragments * inside ICMP packets though. */ if (iph->frag_off & htons(IP_OFFSET)) - return -NF_DROP; + return -NF_ACCEPT; *dataoff = nhoff + (iph->ihl << 2); *protonum = iph->protocol; + /* Check bogus IP headers */ + if (*dataoff > skb->len) { + pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: " + "nhoff %u, ihl %u, skblen %u\n", + nhoff, iph->ihl << 2, skb->len); + return -NF_ACCEPT; + } + return NF_ACCEPT; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 4dc1c104c94..167ea10b521 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2041,7 +2041,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (err < 0) goto e_err; } - rth = rt_dst_alloc(init_net.loopback_dev, + rth = rt_dst_alloc(dev_net(dev)->loopback_dev, IN_DEV_CONF_GET(in_dev, NOPOLICY), false); if (!rth) goto e_nobufs; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index cfd7edda0a8..1272a88c2a6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -701,11 +701,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); if (skb) { if (sk_wmem_schedule(sk, skb->truesize)) { + skb_reserve(skb, sk->sk_prot->max_header); /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ - skb_reserve(skb, skb_tailroom(skb) - size); + skb->avail_size = size; return skb; } __kfree_skb(skb); @@ -860,7 +861,7 @@ wait_for_memory: } out: - if (copied) + if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) tcp_push(sk, flags, mss_now, tp->nonagle); return copied; @@ -995,10 +996,9 @@ new_segment: copy = seglen; /* Where to copy to? */ - if (skb_tailroom(skb) > 0) { + if (skb_availroom(skb) > 0) { /* We have some space in skb head. Superb! */ - if (copy > skb_tailroom(skb)) - copy = skb_tailroom(skb); + copy = min_t(int, copy, skb_availroom(skb)); err = skb_add_data_nocache(sk, skb, from, copy); if (err) goto do_fault; @@ -1452,7 +1452,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if ((available < target) && (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && !sysctl_tcp_low_latency && - dma_find_channel(DMA_MEMCPY)) { + net_dma_find_channel()) { preempt_enable_no_resched(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); @@ -1667,7 +1667,7 @@ do_prequeue: if (!(flags & MSG_TRUNC)) { #ifdef CONFIG_NET_DMA if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) - tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); + tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan) { tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( @@ -3243,7 +3243,7 @@ void __init tcp_init(void) { struct sk_buff *skb = NULL; unsigned long limit; - int max_share, cnt; + int max_rshare, max_wshare, cnt; unsigned int i; unsigned long jiffy = jiffies; @@ -3302,17 +3302,17 @@ void __init tcp_init(void) tcp_init_mem(&init_net); /* Set per-socket limits to no more than 1/128 the pressure threshold */ - limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10); - limit = max(limit, 128UL); - max_share = min(4UL*1024*1024, limit); + limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); + max_wshare = min(4UL*1024*1024, limit); + max_rshare = min(6UL*1024*1024, limit); sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; sysctl_tcp_wmem[1] = 16*1024; - sysctl_tcp_wmem[2] = max(64*1024, max_share); + sysctl_tcp_wmem[2] = max(64*1024, max_wshare); sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; sysctl_tcp_rmem[1] = 87380; - sysctl_tcp_rmem[2] = max(87380, max_share); + sysctl_tcp_rmem[2] = max(87380, max_rshare); pr_info("Hash tables configured (established %u bind %u)\n", tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e886e2f7fa8..257b61789ee 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2; EXPORT_SYMBOL(sysctl_tcp_ecn); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; -int sysctl_tcp_adv_win_scale __read_mostly = 2; +int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); int sysctl_tcp_stdurg __read_mostly; @@ -335,6 +335,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) incr = __tcp_grow_window(sk, skb); if (incr) { + incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; @@ -474,8 +475,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) if (!win_dep) { m -= (new_sample >> 3); new_sample += m; - } else if (m < new_sample) - new_sample = m << 3; + } else { + m <<= 3; + if (m < new_sample) + new_sample = m; + } } else { /* No previous measure. */ new_sample = m << 3; @@ -491,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; - tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); + tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; @@ -2864,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk) /* Do not moderate cwnd if it's already undone in cwr or recovery. */ if (tp->undo_marker) { - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) + if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); - else /* PRR */ + tp->snd_cwnd_stamp = tcp_time_stamp; + } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { + /* PRR algorithm. */ tp->snd_cwnd = tp->snd_ssthresh; - tp->snd_cwnd_stamp = tcp_time_stamp; + tp->snd_cwnd_stamp = tcp_time_stamp; + } } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } @@ -5225,7 +5232,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, return 0; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) - tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); + tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3a25cf743f8..0cb86ceb652 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1730,7 +1730,7 @@ process: #ifdef CONFIG_NET_DMA struct tcp_sock *tp = tcp_sk(sk); if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) - tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); + tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan) ret = tcp_v4_do_rcv(sk, skb); else diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 364784a9193..7ac6423117a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1096,6 +1096,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); + skb->avail_size -= eat; len -= eat; if (!len) return; @@ -2060,7 +2061,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, /* Punt if not enough space exists in the first SKB for * the data in the second */ - if (skb->len > skb_tailroom(to)) + if (skb->len > skb_availroom(to)) break; if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 8a949f19deb..a7f86a3cd50 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, return udp_dump_one(&udp_table, in_skb, nlh, req); } +static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, + void *info) +{ + r->idiag_rqueue = sk_rmem_alloc_get(sk); + r->idiag_wqueue = sk_wmem_alloc_get(sk); +} + static const struct inet_diag_handler udp_diag_handler = { .dump = udp_diag_dump, .dump_one = udp_diag_dump_one, + .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDP, }; @@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr * static const struct inet_diag_handler udplite_diag_handler = { .dump = udplite_diag_dump, .dump_one = udplite_diag_dump_one, + .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDPLITE, }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6a3bb6077e1..7d5cb975cc6 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -803,8 +803,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) ip6_del_rt(rt); rt = NULL; } else if (!(rt->rt6i_flags & RTF_EXPIRES)) { - rt->dst.expires = expires; - rt->rt6i_flags |= RTF_EXPIRES; + rt6_set_expires(rt, expires); } } dst_release(&rt->dst); @@ -1887,11 +1886,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) rt = NULL; } else if (addrconf_finite_timeout(rt_expires)) { /* not infinity */ - rt->dst.expires = jiffies + rt_expires; - rt->rt6i_flags |= RTF_EXPIRES; + rt6_set_expires(rt, jiffies + rt_expires); } else { - rt->rt6i_flags &= ~RTF_EXPIRES; - rt->dst.expires = 0; + rt6_clean_expires(rt); } } else if (valid_lft) { clock_t expires = 0; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5b27fbcae34..93717435013 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, &rt->rt6i_gateway)) { if (!(iter->rt6i_flags & RTF_EXPIRES)) return -EEXIST; - iter->dst.expires = rt->dst.expires; - if (!(rt->rt6i_flags & RTF_EXPIRES)) { - iter->rt6i_flags &= ~RTF_EXPIRES; - iter->dst.expires = 0; - } + if (!(rt->rt6i_flags & RTF_EXPIRES)) + rt6_clean_expires(iter); + else + rt6_set_expires(iter, rt->dst.expires); return -EEXIST; } } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 16c33e30812..b2869cab209 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2044,7 +2044,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!delta) pmc->mca_sfcount[sfmode]--; for (j=0; j<i; j++) - (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); + ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]); } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { struct ip6_sf_list *psf; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3dcdb81ec3e..176b469322a 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1264,8 +1264,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) } if (rt) - rt->dst.expires = jiffies + (HZ * lifetime); - + rt6_set_expires(rt, jiffies + (HZ * lifetime)); if (ra_msg->icmph.icmp6_hop_limit) { in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; if (rt) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 94874b0bdcd..9d4e1555931 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -78,19 +78,6 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); Hence the start of any table is given by get_table() below. */ -/* Check for an extension */ -int -ip6t_ext_hdr(u8 nexthdr) -{ - return (nexthdr == IPPROTO_HOPOPTS) || - (nexthdr == IPPROTO_ROUTING) || - (nexthdr == IPPROTO_FRAGMENT) || - (nexthdr == IPPROTO_ESP) || - (nexthdr == IPPROTO_AH) || - (nexthdr == IPPROTO_NONE) || - (nexthdr == IPPROTO_DSTOPTS); -} - /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool @@ -2366,7 +2353,6 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); EXPORT_SYMBOL(ip6t_do_table); -EXPORT_SYMBOL(ip6t_ext_hdr); EXPORT_SYMBOL(ipv6_find_hdr); module_init(ip6_tables_init); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 496b62712fe..bc4888d902b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -62,7 +62,7 @@ #include <linux/sysctl.h> #endif -static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, +static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, const struct in6_addr *dest); static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ip6_default_advmss(const struct dst_entry *dst); @@ -285,6 +285,10 @@ static void ip6_dst_destroy(struct dst_entry *dst) rt->rt6i_idev = NULL; in6_dev_put(idev); } + + if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) + dst_release(dst->from); + if (peer) { rt->rt6i_peer = NULL; inet_putpeer(peer); @@ -329,8 +333,17 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static __inline__ int rt6_check_expired(const struct rt6_info *rt) { - return (rt->rt6i_flags & RTF_EXPIRES) && - time_after(jiffies, rt->dst.expires); + struct rt6_info *ort = NULL; + + if (rt->rt6i_flags & RTF_EXPIRES) { + if (time_after(jiffies, rt->dst.expires)) + return 1; + } else if (rt->dst.from) { + ort = (struct rt6_info *) rt->dst.from; + return (ort->rt6i_flags & RTF_EXPIRES) && + time_after(jiffies, ort->dst.expires); + } + return 0; } static inline int rt6_need_strict(const struct in6_addr *daddr) @@ -620,12 +633,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); if (rt) { - if (!addrconf_finite_timeout(lifetime)) { - rt->rt6i_flags &= ~RTF_EXPIRES; - } else { - rt->dst.expires = jiffies + HZ * lifetime; - rt->rt6i_flags |= RTF_EXPIRES; - } + if (!addrconf_finite_timeout(lifetime)) + rt6_clean_expires(rt); + else + rt6_set_expires(rt, jiffies + HZ * lifetime); + dst_release(&rt->dst); } return 0; @@ -730,7 +742,7 @@ int ip6_ins_rt(struct rt6_info *rt) return __ip6_ins_rt(rt, &info); } -static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, +static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr, const struct in6_addr *saddr) { @@ -881,6 +893,16 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table * return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); } +static struct dst_entry *ip6_route_input_lookup(struct net *net, + struct net_device *dev, + struct flowi6 *fl6, int flags) +{ + if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) + flags |= RT6_LOOKUP_F_IFACE; + + return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); +} + void ip6_route_input(struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); @@ -895,10 +917,7 @@ void ip6_route_input(struct sk_buff *skb) .flowi6_proto = iph->nexthdr, }; - if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) - flags |= RT6_LOOKUP_F_IFACE; - - skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input)); + skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); } static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, @@ -947,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); - rt->dst.expires = 0; rt->rt6i_gateway = ort->rt6i_gateway; - rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; + rt->rt6i_flags = ort->rt6i_flags; + rt6_clean_expires(rt); rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); @@ -1012,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb) rt = (struct rt6_info *) skb_dst(skb); if (rt) { - if (rt->rt6i_flags & RTF_CACHE) { - dst_set_expires(&rt->dst, 0); - rt->rt6i_flags |= RTF_EXPIRES; - } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) + if (rt->rt6i_flags & RTF_CACHE) + rt6_update_expires(rt, 0); + else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) rt->rt6i_node->fn_sernum = -1; } } @@ -1282,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg) } rt->dst.obsolete = -1; - rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ? - jiffies + clock_t_to_jiffies(cfg->fc_expires) : - 0; + + if (cfg->fc_flags & RTF_EXPIRES) + rt6_set_expires(rt, jiffies + + clock_t_to_jiffies(cfg->fc_expires)); + else + rt6_clean_expires(rt); if (cfg->fc_protocol == RTPROT_UNSPEC) cfg->fc_protocol = RTPROT_BOOT; @@ -1729,8 +1750,8 @@ again: features |= RTAX_FEATURE_ALLFRAG; dst_metric_set(&rt->dst, RTAX_FEATURES, features); } - dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); - rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; + rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); + rt->rt6i_flags |= RTF_MODIFIED; goto out; } @@ -1758,9 +1779,8 @@ again: * which is 10 mins. After 10 mins the decreased pmtu is expired * and detecting PMTU increase will be automatically happened. */ - dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); - nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; - + rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires); + nrt->rt6i_flags |= RTF_DYNAMIC; ip6_ins_rt(nrt); } out: @@ -1792,7 +1812,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad * Misc support functions */ -static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, +static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, const struct in6_addr *dest) { struct net *net = dev_net(ort->dst.dev); @@ -1812,10 +1832,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; - rt->dst.expires = 0; rt->rt6i_gateway = ort->rt6i_gateway; - rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; + rt->rt6i_flags = ort->rt6i_flags; + if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == + (RTF_DEFAULT | RTF_ADDRCONF)) + rt6_set_from(rt, ort); + else + rt6_clean_expires(rt); rt->rt6i_metric = 0; #ifdef CONFIG_IPV6_SUBTREES @@ -2537,7 +2561,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void struct sk_buff *skb; struct rtmsg *rtm; struct flowi6 fl6; - int err, iif = 0; + int err, iif = 0, oif = 0; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); if (err < 0) @@ -2564,15 +2588,29 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void iif = nla_get_u32(tb[RTA_IIF]); if (tb[RTA_OIF]) - fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]); + oif = nla_get_u32(tb[RTA_OIF]); if (iif) { struct net_device *dev; + int flags = 0; + dev = __dev_get_by_index(net, iif); if (!dev) { err = -ENODEV; goto errout; } + + fl6.flowi6_iif = iif; + + if (!ipv6_addr_any(&fl6.saddr)) + flags |= RT6_LOOKUP_F_HAS_SADDR; + + rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6, + flags); + } else { + fl6.flowi6_oif = oif; + + rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); @@ -2587,7 +2625,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void skb_reset_mac_header(skb); skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); - rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6); skb_dst_set(skb, &rt->dst); err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 12c6ece67f3..98256cf72f9 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1383,6 +1383,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, tcp_mtup_init(newsk); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); + if (tcp_sk(sk)->rx_opt.user_mss && + tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) + newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; + tcp_initialize_rcv_mss(newsk); if (tcp_rsk(req)->snt_synack) tcp_valid_rtt_meas(newsk, @@ -1645,7 +1649,7 @@ process: #ifdef CONFIG_NET_DMA struct tcp_sock *tp = tcp_sk(sk); if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) - tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); + tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan) ret = tcp_v6_do_rcv(sk, skb); else diff --git a/net/key/af_key.c b/net/key/af_key.c index 11dbb2255cc..7e5d927b576 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3480,7 +3480,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, /* Addresses to be used by KM for negotiation, if ext is available */ if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) - return -EINVAL; + goto err; /* selector src */ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 55670ec3cd0..6274f0be82b 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -232,7 +232,7 @@ static void l2tp_ip_close(struct sock *sk, long timeout) { write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); - hlist_del_init(&sk->sk_node); + sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); sk_common_release(sk); } @@ -271,7 +271,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; - inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; + if (addr->l2tp_addr.s_addr) + inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); @@ -441,8 +442,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m daddr = lip->l2tp_addr.s_addr; } else { + rc = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) - return -EDESTADDRREQ; + goto out; daddr = inet->inet_daddr; connected = 1; diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 1068f668ac4..64d3ce5ea1a 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -49,6 +49,8 @@ static void ieee80211_free_tid_rx(struct rcu_head *h) container_of(h, struct tid_ampdu_rx, rcu_head); int i; + del_timer_sync(&tid_rx->reorder_timer); + for (i = 0; i < tid_rx->buf_size; i++) dev_kfree_skb(tid_rx->reorder_buf[i]); kfree(tid_rx->reorder_buf); @@ -91,7 +93,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, tid, WLAN_BACK_RECIPIENT, reason); del_timer_sync(&tid_rx->session_timer); - del_timer_sync(&tid_rx->reorder_timer); call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); } diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index cc5b7a6e7e0..778e5916d7c 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -15,12 +15,6 @@ #include "rate.h" #include "debugfs.h" -int mac80211_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - #define DEBUGFS_FORMAT_BUFFER_SIZE 100 int mac80211_format_buffer(char __user *userbuf, size_t count, @@ -50,7 +44,7 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \ #define DEBUGFS_READONLY_FILE_OPS(name) \ static const struct file_operations name## _ops = { \ .read = name## _read, \ - .open = mac80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ }; @@ -93,7 +87,7 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf, static const struct file_operations reset_ops = { .write = reset_write, - .open = mac80211_open_file_generic, + .open = simple_open, .llseek = noop_llseek, }; @@ -254,7 +248,7 @@ static ssize_t stats_ ##name## _read(struct file *file, \ \ static const struct file_operations stats_ ##name## _ops = { \ .read = stats_ ##name## _read, \ - .open = mac80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ }; diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h index 7c87529630f..9be4e6d71d0 100644 --- a/net/mac80211/debugfs.h +++ b/net/mac80211/debugfs.h @@ -3,7 +3,6 @@ #ifdef CONFIG_MAC80211_DEBUGFS extern void debugfs_hw_add(struct ieee80211_local *local); -extern int mac80211_open_file_generic(struct inode *inode, struct file *file); extern int mac80211_format_buffer(char __user *userbuf, size_t count, loff_t *ppos, char *fmt, ...); #else diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 59edcd95a58..7932767bb48 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -30,7 +30,7 @@ static ssize_t key_##name##_read(struct file *file, \ #define KEY_OPS(name) \ static const struct file_operations key_ ##name## _ops = { \ .read = key_##name##_read, \ - .open = mac80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ } @@ -45,7 +45,7 @@ static const struct file_operations key_ ##name## _ops = { \ #define KEY_CONF_OPS(name) \ static const struct file_operations key_ ##name## _ops = { \ .read = key_conf_##name##_read, \ - .open = mac80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index a32eeda04aa..30f99c34484 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -135,7 +135,7 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \ static const struct file_operations name##_ops = { \ .read = ieee80211_if_read_##name, \ .write = (_write), \ - .open = mac80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ } diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 6d45804d09b..832b2da5e4c 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -33,7 +33,7 @@ static ssize_t sta_ ##name## _read(struct file *file, \ #define STA_OPS(name) \ static const struct file_operations sta_ ##name## _ops = { \ .read = sta_##name##_read, \ - .open = mac80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ } @@ -41,7 +41,7 @@ static const struct file_operations sta_ ##name## _ops = { \ static const struct file_operations sta_ ##name## _ops = { \ .read = sta_##name##_read, \ .write = sta_##name##_write, \ - .open = mac80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ } diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 33fd8d9f714..cef7c29214a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -457,8 +457,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, * fall back to HT20 if we don't use or use * the other extension channel */ - if ((channel_type == NL80211_CHAN_HT40MINUS || - channel_type == NL80211_CHAN_HT40PLUS) && + if (!(channel_type == NL80211_CHAN_HT40MINUS || + channel_type == NL80211_CHAN_HT40PLUS) || channel_type != sdata->u.ibss.channel_type) sta_ht_cap_new.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d9798a307f2..db8fae51714 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1210,7 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); -void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata); +void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 401c01f0731..c20051b7ffc 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -486,6 +486,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, /* free all potentially still buffered bcast frames */ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); skb_queue_purge(&sdata->u.ap.ps_bc_buf); + } else if (sdata->vif.type == NL80211_IFTYPE_STATION) { + ieee80211_mgd_stop(sdata); } if (going_down) @@ -644,8 +646,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev) if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_rmc_free(sdata); - else if (sdata->vif.type == NL80211_IFTYPE_STATION) - ieee80211_mgd_teardown(sdata); flushed = sta_info_flush(local, sdata); WARN_ON(flushed); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index b581a24fa15..16336480c63 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -102,9 +102,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) might_sleep(); - /* If this off-channel logic ever changes, ieee80211_on_oper_channel - * may need to change as well. - */ offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; if (local->scan_channel) { chan = local->scan_channel; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 576fb25456d..20c680bfc3a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3387,8 +3387,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, */ printk(KERN_DEBUG "%s: waiting for beacon from %pM\n", sdata->name, ifmgd->bssid); - assoc_data->timeout = jiffies + - TU_TO_EXP_TIME(req->bss->beacon_interval); + assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); } else { assoc_data->have_beacon = true; assoc_data->sent_assoc = false; @@ -3498,7 +3497,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, return 0; } -void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata) +void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index b4f7600a3e3..3313c117b32 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -145,7 +145,7 @@ static ssize_t rcname_read(struct file *file, char __user *userbuf, static const struct file_operations rcname_ops = { .read = rcname_read, - .open = mac80211_open_file_generic, + .open = simple_open, .llseek = default_llseek, }; #endif diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bcfe8c77c83..d64e285400a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -103,7 +103,7 @@ static void ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_rate *rate, - int rtap_len) + int rtap_len, bool has_fcs) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_header *rthdr; @@ -134,7 +134,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } /* IEEE80211_RADIOTAP_FLAGS */ - if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) + if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)) *pos |= IEEE80211_RADIOTAP_F_FCS; if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) *pos |= IEEE80211_RADIOTAP_F_BADFCS; @@ -294,7 +294,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, } /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, + true); skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -2571,7 +2572,8 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, goto out_free_skb; /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, + false); skb_set_mac_header(skb, 0); skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 33cd1690137..c70e1767713 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -370,7 +370,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) */ drv_sw_scan_start(local); - local->leave_oper_channel_time = 0; + local->leave_oper_channel_time = jiffies; local->next_scan_state = SCAN_DECISION; local->scan_channel_idx = 0; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 782a60198df..e76facc69e9 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1158,7 +1158,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, tx->sta = rcu_dereference(sdata->u.vlan.sta); if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) return TX_DROP; - } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { + } else if (info->flags & IEEE80211_TX_CTL_INJECTED || + tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } if (!tx->sta) diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 5139dea6019..828ce46cb34 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -364,6 +364,7 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) { u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 netmask, hbits; + size_t hsize; struct ip_set_hash *h; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) @@ -405,9 +406,12 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); - h->table = ip_set_alloc( - sizeof(struct htable) - + jhash_size(hbits) * sizeof(struct hbucket)); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 9c27e249c17..e8dbb498af8 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -449,6 +449,7 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; + size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; @@ -476,9 +477,12 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); - h->table = ip_set_alloc( - sizeof(struct htable) - + jhash_size(hbits) * sizeof(struct hbucket)); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 9134057c072..52f79d8ef74 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -467,6 +467,7 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; + size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; @@ -494,9 +495,12 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); - h->table = ip_set_alloc( - sizeof(struct htable) - + jhash_size(hbits) * sizeof(struct hbucket)); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 5d05e696986..97583f5af74 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -616,6 +616,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags) struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; + size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; @@ -645,9 +646,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags) h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); - h->table = ip_set_alloc( - sizeof(struct htable) - + jhash_size(hbits) * sizeof(struct hbucket)); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 7c3d945517c..1721cdecc9f 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -460,6 +460,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags) u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; struct ip_set_hash *h; u8 hbits; + size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; @@ -489,9 +490,12 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags) h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); - h->table = ip_set_alloc( - sizeof(struct htable) - + jhash_size(hbits) * sizeof(struct hbucket)); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index f24037ff432..33bafc97ca6 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -722,6 +722,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags) struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; + size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; @@ -752,9 +753,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags) h->ahash_max = AHASH_MAX_SIZE; hbits = htable_bits(hashsize); - h->table = ip_set_alloc( - sizeof(struct htable) - + jhash_size(hbits) * sizeof(struct hbucket)); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index ce2e77100b6..3a5e198641d 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -572,6 +572,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) struct ip_set_hash *h; u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; u8 hbits; + size_t hsize; if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; @@ -601,9 +602,12 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags) h->timeout = IPSET_NO_TIMEOUT; hbits = htable_bits(hashsize); - h->table = ip_set_alloc( - sizeof(struct htable) - + jhash_size(hbits) * sizeof(struct hbucket)); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); + return -ENOMEM; + } + h->table = ip_set_alloc(hsize); if (!h->table) { kfree(h); return -ENOMEM; diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 2555816e778..00bdb1d9d69 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1924,6 +1924,7 @@ protocol_fail: control_fail: ip_vs_estimator_net_cleanup(net); estimator_fail: + net->ipvs = NULL; return -ENOMEM; } @@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net) ip_vs_control_net_cleanup(net); ip_vs_estimator_net_cleanup(net); IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); + net->ipvs = NULL; } static void __net_exit __ip_vs_dev_cleanup(struct net *net) @@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void) goto cleanup_dev; } + ret = ip_vs_register_nl_ioctl(); + if (ret < 0) { + pr_err("can't register netlink/ioctl.\n"); + goto cleanup_hooks; + } + pr_info("ipvs loaded.\n"); return ret; +cleanup_hooks: + nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); cleanup_dev: unregister_pernet_device(&ipvs_core_dev_ops); cleanup_sub: @@ -2012,6 +2022,7 @@ exit: static void __exit ip_vs_cleanup(void) { + ip_vs_unregister_nl_ioctl(); nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); unregister_pernet_device(&ipvs_core_dev_ops); unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b3afe189af6..f5589987fc8 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) return 0; } -void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) +void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) #else int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } -void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { } +void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { } #endif @@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net) free_percpu(ipvs->tot_stats.cpustats); } -int __init ip_vs_control_init(void) +int __init ip_vs_register_nl_ioctl(void) { - int idx; int ret; - EnterFunction(2); - - /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */ - for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { - INIT_LIST_HEAD(&ip_vs_svc_table[idx]); - INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); - } - - smp_wmb(); /* Do we really need it now ? */ - ret = nf_register_sockopt(&ip_vs_sockopts); if (ret) { pr_err("cannot register sockopt.\n"); @@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void) pr_err("cannot register Generic Netlink interface.\n"); goto err_genl; } - - ret = register_netdevice_notifier(&ip_vs_dst_notifier); - if (ret < 0) - goto err_notf; - - LeaveFunction(2); return 0; -err_notf: - ip_vs_genl_unregister(); err_genl: nf_unregister_sockopt(&ip_vs_sockopts); err_sock: return ret; } +void ip_vs_unregister_nl_ioctl(void) +{ + ip_vs_genl_unregister(); + nf_unregister_sockopt(&ip_vs_sockopts); +} + +int __init ip_vs_control_init(void) +{ + int idx; + int ret; + + EnterFunction(2); + + /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */ + for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { + INIT_LIST_HEAD(&ip_vs_svc_table[idx]); + INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); + } + + smp_wmb(); /* Do we really need it now ? */ + + ret = register_netdevice_notifier(&ip_vs_dst_notifier); + if (ret < 0) + return ret; + + LeaveFunction(2); + return 0; +} + void ip_vs_control_cleanup(void) { EnterFunction(2); unregister_netdevice_notifier(&ip_vs_dst_notifier); - ip_vs_genl_unregister(); - nf_unregister_sockopt(&ip_vs_sockopts); LeaveFunction(2); } diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 538d74ee4f6..e39f693dd3e 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net) struct ip_vs_app *app; struct netns_ipvs *ipvs = net_ipvs(net); + if (!ipvs) + return -ENOENT; app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); if (!app) return -ENOMEM; diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 0f16283fd05..caa43704e55 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); + if (!ipvs) + return -ENOENT; + if (!net_eq(net, &init_net)) { ipvs->lblc_ctl_table = kmemdup(vs_vars_table, sizeof(vs_vars_table), diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index eec797f8cce..548bf37aa29 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); + if (!ipvs) + return -ENOENT; + if (!net_eq(net, &init_net)) { ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, sizeof(vs_vars_table), diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index f843a883325..ed835e67a07 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) return 0; } -#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \ - defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \ - defined(CONFIG_IP_VS_PROTO_ESP) /* * register an ipvs protocols netns related data */ @@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) ipvs->proto_data_table[hash] = pd; atomic_set(&pd->appcnt, 0); /* Init app counter */ - if (pp->init_netns != NULL) - pp->init_netns(net, pd); + if (pp->init_netns != NULL) { + int ret = pp->init_netns(net, pd); + if (ret) { + /* unlink an free proto data */ + ipvs->proto_data_table[hash] = pd->next; + kfree(pd); + return ret; + } + } return 0; } -#endif /* * unregister an ipvs protocol @@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, */ int __net_init ip_vs_protocol_net_init(struct net *net) { + int i, ret; + static struct ip_vs_protocol *protos[] = { #ifdef CONFIG_IP_VS_PROTO_TCP - register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); + &ip_vs_protocol_tcp, #endif #ifdef CONFIG_IP_VS_PROTO_UDP - register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); + &ip_vs_protocol_udp, #endif #ifdef CONFIG_IP_VS_PROTO_SCTP - register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); + &ip_vs_protocol_sctp, #endif #ifdef CONFIG_IP_VS_PROTO_AH - register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); + &ip_vs_protocol_ah, #endif #ifdef CONFIG_IP_VS_PROTO_ESP - register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); + &ip_vs_protocol_esp, #endif + }; + + for (i = 0; i < ARRAY_SIZE(protos); i++) { + ret = register_ip_vs_proto_netns(net, protos[i]); + if (ret < 0) + goto cleanup; + } return 0; + +cleanup: + ip_vs_protocol_net_cleanup(net); + return ret; } void __net_exit ip_vs_protocol_net_cleanup(struct net *net) diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 1fbf7a2816f..9f3fb751c49 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -1090,7 +1090,7 @@ out: * timeouts is netns related now. * --------------------------------------------- */ -static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) +static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) spin_lock_init(&ipvs->sctp_app_lock); pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, sizeof(sctp_timeouts)); + if (!pd->timeout_table) + return -ENOMEM; + return 0; } static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index ef8641f7af8..cd609cc6272 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp) * timeouts is netns related now. * --------------------------------------------- */ -static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) +static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) spin_lock_init(&ipvs->tcp_app_lock); pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, sizeof(tcp_timeouts)); + if (!pd->timeout_table) + return -ENOMEM; pd->tcp_state_table = tcp_states; + return 0; } static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index f4b7262896b..2fedb2dcb3d 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction, cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; } -static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) +static int __udp_init(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) spin_lock_init(&ipvs->udp_app_lock); pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, sizeof(udp_timeouts)); + if (!pd->timeout_table) + return -ENOMEM; + return 0; } static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index cbdb754dbb1..729f157a0ef 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -735,6 +735,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone, #ifdef CONFIG_NF_CONNTRACK_ZONES out_free: + atomic_dec(&net->ct.count); kmem_cache_free(net->ct.nf_conntrack_cachep, ct); return ERR_PTR(-ENOMEM); #endif @@ -1591,7 +1592,7 @@ static int nf_conntrack_init_net(struct net *net) return 0; err_timeout: - nf_conntrack_timeout_fini(net); + nf_conntrack_ecache_fini(net); err_ecache: nf_conntrack_tstamp_fini(net); err_tstamp: diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 361eade62a0..0d07a1dcf60 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -584,8 +584,8 @@ static bool tcp_in_window(const struct nf_conn *ct, * Let's try to use the data from the packet. */ sender->td_end = end; - win <<= sender->td_scale; - sender->td_maxwin = (win == 0 ? 1 : win); + swin = win << sender->td_scale; + sender->td_maxwin = (swin == 0 ? 1 : swin); sender->td_maxend = end + sender->td_maxwin; /* * We haven't seen traffic in the other direction yet diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 3eb348bfc4f..d98c868c148 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> +#include <linux/atomic.h> #include <linux/netlink.h> #include <linux/rculist.h> #include <linux/slab.h> @@ -17,7 +18,6 @@ #include <linux/errno.h> #include <net/netlink.h> #include <net/sock.h> -#include <asm/atomic.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 0c8e43810ce..3746d8b9a47 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -150,6 +150,17 @@ err1: return ret; } +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT +static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout) +{ + typeof(nf_ct_timeout_put_hook) timeout_put; + + timeout_put = rcu_dereference(nf_ct_timeout_put_hook); + if (timeout_put) + timeout_put(timeout); +} +#endif + static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) { struct xt_ct_target_info_v1 *info = par->targinfo; @@ -158,7 +169,9 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) struct nf_conn *ct; int ret = 0; u8 proto; - +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + struct ctnl_timeout *timeout; +#endif if (info->flags & ~XT_CT_NOTRACK) return -EINVAL; @@ -214,9 +227,8 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) } #ifdef CONFIG_NF_CONNTRACK_TIMEOUT - if (info->timeout) { + if (info->timeout[0]) { typeof(nf_ct_timeout_find_get_hook) timeout_find_get; - struct ctnl_timeout *timeout; struct nf_conn_timeout *timeout_ext; rcu_read_lock(); @@ -245,7 +257,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) pr_info("Timeout policy `%s' can only be " "used by L3 protocol number %d\n", info->timeout, timeout->l3num); - goto err4; + goto err5; } /* Make sure the timeout policy matches any existing * protocol tracker, otherwise default to generic. @@ -258,13 +270,13 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) "used by L4 protocol number %d\n", info->timeout, timeout->l4proto->l4proto); - goto err4; + goto err5; } timeout_ext = nf_ct_timeout_ext_add(ct, timeout, - GFP_KERNEL); + GFP_ATOMIC); if (timeout_ext == NULL) { ret = -ENOMEM; - goto err4; + goto err5; } } else { ret = -ENOENT; @@ -281,8 +293,12 @@ out: info->ct = ct; return 0; +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT +err5: + __xt_ct_tg_timeout_put(timeout); err4: rcu_read_unlock(); +#endif err3: nf_conntrack_free(ct); err2: diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 32bb75324e7..faa48f70b7c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -829,12 +829,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, return 0; } -int netlink_sendskb(struct sock *sk, struct sk_buff *skb) +static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); + return len; +} + +int netlink_sendskb(struct sock *sk, struct sk_buff *skb) +{ + int len = __netlink_sendskb(sk, skb); + sock_put(sk); return len; } @@ -957,8 +964,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && !test_bit(0, &nlk->state)) { skb_set_owner_r(skb, sk); - skb_queue_tail(&sk->sk_receive_queue, skb); - sk->sk_data_ready(sk, skb->len); + __netlink_sendskb(sk, skb); return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); } return -1; @@ -1698,10 +1704,8 @@ static int netlink_dump(struct sock *sk) if (sk_filter(sk, skb)) kfree_skb(skb); - else { - skb_queue_tail(&sk->sk_receive_queue, skb); - sk->sk_data_ready(sk, skb->len); - } + else + __netlink_sendskb(sk, skb); return 0; } @@ -1715,10 +1719,8 @@ static int netlink_dump(struct sock *sk) if (sk_filter(sk, skb)) kfree_skb(skb); - else { - skb_queue_tail(&sk->sk_receive_queue, skb); - sk->sk_data_ready(sk, skb->len); - } + else + __netlink_sendskb(sk, skb); if (cb->done) cb->done(cb); diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c index 7b76eb7192f..ef10ffcb4b6 100644 --- a/net/nfc/llcp/commands.c +++ b/net/nfc/llcp/commands.c @@ -474,7 +474,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, while (remaining_len > 0) { - frag_len = min_t(u16, local->remote_miu, remaining_len); + frag_len = min_t(size_t, local->remote_miu, remaining_len); pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); @@ -497,7 +497,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, release_sock(sk); remaining_len -= frag_len; - msg_ptr += len; + msg_ptr += frag_len; } kfree(msg_data); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e44e631ea95..e66341ec455 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -321,7 +321,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); - if (!skb) + if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; @@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr, return validate_actions(actions, key, depth + 1); } +static int validate_tp_port(const struct sw_flow_key *flow_key) +{ + if (flow_key->eth.type == htons(ETH_P_IP)) { + if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) + return 0; + } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { + if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) + return 0; + } + + return -EINVAL; +} + static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key) { @@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a, if (flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) - return -EINVAL; - - break; + return validate_tp_port(flow_key); case OVS_KEY_ATTR_UDP: if (flow_key->ip.proto != IPPROTO_UDP) return -EINVAL; - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) - return -EINVAL; - break; + return validate_tp_port(flow_key); default: return -EINVAL; @@ -1641,10 +1649,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { - err = PTR_ERR(reply); netlink_set_err(init_net.genl_sock, 0, - ovs_dp_vport_multicast_group.id, err); - return 0; + ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); + goto exit_unlock; } genl_notify(reply, genl_info_net(info), info->snd_pid, diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 1252c3081ef..2a11ec2383e 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) u8 tcp_flags = 0; if (flow->key.eth.type == htons(ETH_P_IP) && - flow->key.ip.proto == IPPROTO_TCP) { + flow->key.ip.proto == IPPROTO_TCP && + likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { u8 *tcp = (u8 *)tcp_hdr(skb); tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; } diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 9f60008740e..9726fe684ab 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -1130,6 +1130,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, int flags = msg->msg_flags; int err, done; + if (len > USHRT_MAX) + return -EMSGSIZE; + if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| MSG_CMSG_COMPAT)) || !(msg->msg_flags & MSG_EOR)) diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 9b9a85ecc4c..bf5cf69c820 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -331,23 +331,6 @@ static int __net_init phonet_init_net(struct net *net) static void __net_exit phonet_exit_net(struct net *net) { - struct phonet_net *pnn = phonet_pernet(net); - struct net_device *dev; - unsigned i; - - rtnl_lock(); - for_each_netdev(net, dev) - phonet_device_destroy(dev); - - for (i = 0; i < 64; i++) { - dev = pnn->routes.table[i]; - if (dev) { - rtm_phonet_notify(RTM_DELROUTE, dev, i); - dev_put(dev); - } - } - rtnl_unlock(); - proc_net_remove(net, "phonet"); } @@ -361,7 +344,7 @@ static struct pernet_operations phonet_net_ops = { /* Initialize Phonet devices list */ int __init phonet_device_init(void) { - int err = register_pernet_device(&phonet_net_ops); + int err = register_pernet_subsys(&phonet_net_ops); if (err) return err; @@ -377,7 +360,7 @@ void phonet_device_exit(void) { rtnl_unregister_all(PF_PHONET); unregister_netdevice_notifier(&phonet_device_notifier); - unregister_pernet_device(&phonet_net_ops); + unregister_pernet_subsys(&phonet_net_ops); proc_net_remove(&init_net, "pnresource"); } diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 1ab8689726e..906cc05bba6 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -95,11 +95,11 @@ static int rose_set_mac_address(struct net_device *dev, void *addr) struct sockaddr *sa = addr; int err; - if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len)) + if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) return 0; if (dev->flags & IFF_UP) { - err = rose_add_loopback_node((rose_address *)dev->dev_addr); + err = rose_add_loopback_node((rose_address *)sa->sa_data); if (err) return err; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 0b15236be7b..8179494c269 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -565,11 +565,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) opt.packets = q->packetsin; opt.bytesin = q->bytesin; - if (gred_wred_mode(table)) { - q->vars.qidlestart = - table->tab[table->def]->vars.qidlestart; - q->vars.qavg = table->tab[table->def]->vars.qavg; - } + if (gred_wred_mode(table)) + gred_load_wred_set(table, q); opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 5da548fa7ae..ebd22966f74 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_help(skb))) { - sch->qstats.drops++; - return NET_XMIT_DROP; - } + skb_checksum_help(skb))) + return qdisc_drop(skb, sch); skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } diff --git a/net/sctp/output.c b/net/sctp/output.c index 817174eb5f4..8fc4dcd294a 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) */ skb_set_owner_w(nskb, sk); - /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ - if (!dst || (dst->obsolete > 1)) { - dst_release(dst); + if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sctp_sk(sk)); if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { sctp_assoc_sync_pmtu(asoc); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 06b42b7f5a0..92ba71dfe08 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4133,9 +4133,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { - if (len < sizeof(struct sctp_event_subscribe)) + if (len <= 0) return -EINVAL; - len = sizeof(struct sctp_event_subscribe); + if (len > sizeof(struct sctp_event_subscribe)) + len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3889330b7b0..b026ba0c699 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } -/* this is a complete rip-off from __sk_dst_check - * the cookie is always 0 since this is how it's used in the - * pmtu code - */ -static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) -{ - struct dst_entry *dst = t->dst; - - if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) { - dst_release(t->dst); - t->dst = NULL; - return NULL; - } - - return dst; -} - void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { struct dst_entry *dst; diff --git a/net/socket.c b/net/socket.c index 484cc6953fc..851edcd6b09 100644 --- a/net/socket.c +++ b/net/socket.c @@ -811,9 +811,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page, sock = file->private_data; - flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; - if (more) - flags |= MSG_MORE; + flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; + /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ + flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index ca8cad8251c..782bfe1b646 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor); int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) { struct gss_api_mech *pos = NULL; - int i = 0; + int j, i = 0; spin_lock(®istered_mechs_lock); list_for_each_entry(pos, ®istered_mechs, gm_list) { - array_ptr[i] = pos->gm_pfs->pseudoflavor; - i++; + for (j=0; j < pos->gm_pf_num; j++) { + array_ptr[i++] = pos->gm_pfs[j].pseudoflavor; + } } spin_unlock(®istered_mechs_lock); return i; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 67972462a54..adf2990aceb 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -176,16 +176,22 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name) return 0; } -static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, - struct super_block *sb) +static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) +{ + if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || + ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) + return 1; + return 0; +} + +static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, + struct super_block *sb) { struct dentry *dentry; int err = 0; switch (event) { case RPC_PIPEFS_MOUNT: - if (clnt->cl_program->pipe_dir_name == NULL) - break; dentry = rpc_setup_pipedir_sb(sb, clnt, clnt->cl_program->pipe_dir_name); BUG_ON(dentry == NULL); @@ -208,6 +214,20 @@ static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, return err; } +static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, + struct super_block *sb) +{ + int error = 0; + + for (;; clnt = clnt->cl_parent) { + if (!rpc_clnt_skip_event(clnt, event)) + error = __rpc_clnt_handle_event(clnt, event, sb); + if (error || clnt == clnt->cl_parent) + break; + } + return error; +} + static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); @@ -215,10 +235,12 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { - if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || - ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) + if (clnt->cl_program->pipe_dir_name == NULL) + break; + if (rpc_clnt_skip_event(clnt, event)) + continue; + if (atomic_inc_not_zero(&clnt->cl_count) == 0) continue; - atomic_inc(&clnt->cl_count); spin_unlock(&sn->rpc_client_lock); return clnt; } @@ -257,6 +279,14 @@ void rpc_clients_notifier_unregister(void) return rpc_pipefs_notifier_unregister(&rpc_clients_block); } +static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) +{ + clnt->cl_nodelen = strlen(nodename); + if (clnt->cl_nodelen > UNX_MAXNODENAME) + clnt->cl_nodelen = UNX_MAXNODENAME; + memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen); +} + static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) { const struct rpc_program *program = args->program; @@ -337,10 +367,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru } /* save the nodename */ - clnt->cl_nodelen = strlen(init_utsname()->nodename); - if (clnt->cl_nodelen > UNX_MAXNODENAME) - clnt->cl_nodelen = UNX_MAXNODENAME; - memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen); + rpc_clnt_set_nodename(clnt, utsname()->nodename); rpc_register_client(clnt); return clnt; @@ -499,6 +526,7 @@ rpc_clone_client(struct rpc_clnt *clnt) err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) goto out_no_path; + rpc_clnt_set_nodename(new, utsname()->nodename); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); atomic_inc(&clnt->cl_count); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 0af37fc4681..3b62cf28803 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1126,19 +1126,20 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, NET_NAME(net)); + sn->pipefs_sb = sb; err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_MOUNT, sb); if (err) goto err_depopulate; sb->s_fs_info = get_net(net); - sn->pipefs_sb = sb; return 0; err_depopulate: blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, sb); + sn->pipefs_sb = NULL; __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); return err; } diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 8adfc88e793..3d6498af9ad 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -75,20 +75,21 @@ static struct pernet_operations sunrpc_net_ops = { static int __init init_sunrpc(void) { - int err = register_rpc_pipefs(); + int err = rpc_init_mempool(); if (err) goto out; - err = rpc_init_mempool(); - if (err) - goto out2; err = rpcauth_init_module(); if (err) - goto out3; + goto out2; cache_initialize(); err = register_pernet_subsys(&sunrpc_net_ops); if (err) + goto out3; + + err = register_rpc_pipefs(); + if (err) goto out4; #ifdef RPC_DEBUG rpc_register_sysctl(); @@ -98,11 +99,11 @@ init_sunrpc(void) return 0; out4: - rpcauth_remove_module(); + unregister_pernet_subsys(&sunrpc_net_ops); out3: - rpc_destroy_mempool(); + rpcauth_remove_module(); out2: - unregister_rpc_pipefs(); + rpc_destroy_mempool(); out: return err; } diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 39765bcfb47..920cabe0461 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -13,12 +13,6 @@ #include "core.h" #include "debugfs.h" -static int cfg80211_open_file_generic(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ @@ -33,7 +27,7 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \ \ static const struct file_operations name## _ops = { \ .read = name## _read, \ - .open = cfg80211_open_file_generic, \ + .open = simple_open, \ .llseek = generic_file_llseek, \ }; @@ -102,7 +96,7 @@ static ssize_t ht40allow_map_read(struct file *file, static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, - .open = cfg80211_open_file_generic, + .open = simple_open, .llseek = default_llseek, }; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4c1eb9472dd..f432c57af05 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1294,6 +1294,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) goto bad_res; } + if (!netif_running(netdev)) { + result = -ENETDOWN; + goto bad_res; + } + nla_for_each_nested(nl_txq_params, info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], rem_txq_params) { @@ -2386,7 +2391,9 @@ nla_put_failure: } static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, - int flags, struct net_device *dev, + int flags, + struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { void *hdr; @@ -2425,12 +2432,18 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, if (sinfo->filled & STATION_INFO_PLINK_STATE) NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, sinfo->plink_state); - if (sinfo->filled & STATION_INFO_SIGNAL) - NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, - sinfo->signal); - if (sinfo->filled & STATION_INFO_SIGNAL_AVG) - NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, - sinfo->signal_avg); + switch (rdev->wiphy.signal_type) { + case CFG80211_SIGNAL_TYPE_MBM: + if (sinfo->filled & STATION_INFO_SIGNAL) + NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, + sinfo->signal); + if (sinfo->filled & STATION_INFO_SIGNAL_AVG) + NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, + sinfo->signal_avg); + break; + default: + break; + } if (sinfo->filled & STATION_INFO_TX_BITRATE) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) @@ -2523,7 +2536,7 @@ static int nl80211_dump_station(struct sk_buff *skb, if (nl80211_send_station(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - netdev, mac_addr, + dev, netdev, mac_addr, &sinfo) < 0) goto out; @@ -2568,7 +2581,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, - dev, mac_addr, &sinfo) < 0) { + rdev, dev, mac_addr, &sinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } @@ -6376,7 +6389,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_get_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6408,7 +6421,7 @@ static struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_set_beacon, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6416,7 +6429,7 @@ static struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_start_ap, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6424,7 +6437,7 @@ static struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_stop_ap, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6440,7 +6453,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_set_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6456,7 +6469,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_del_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6489,7 +6502,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_del_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6497,7 +6510,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_set_bss, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6523,7 +6536,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_get_mesh_config, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6656,7 +6669,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6664,7 +6677,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6672,7 +6685,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_flush_pmksa, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -6832,7 +6845,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_probe_client, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV | + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { @@ -7596,7 +7609,8 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, if (!msg) return; - if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) { + if (nl80211_send_station(msg, 0, 0, 0, + rdev, dev, mac_addr, sinfo) < 0) { nlmsg_free(msg); return; } diff --git a/net/wireless/util.c b/net/wireless/util.c index 1b7a08df933..957f2562161 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -989,7 +989,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, if (rdev->wiphy.software_iftypes & BIT(iftype)) continue; for (j = 0; j < c->n_limits; j++) { - if (!(limits[j].types & iftype)) + if (!(limits[j].types & BIT(iftype))) continue; if (limits[j].max < num[iftype]) goto cont; diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 0af7f54e4f6..af648e08e61 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -780,8 +780,10 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, if (cmd == SIOCSIWENCODEEXT) { struct iw_encode_ext *ee = (void *) extra; - if (iwp->length < sizeof(*ee) + ee->key_len) - return -EFAULT; + if (iwp->length < sizeof(*ee) + ee->key_len) { + err = -EFAULT; + goto out; + } } } |