diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-05 17:53:05 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-05 17:53:05 +0200 |
commit | 28c3cfd5fb998bd3683bebeebbba38baa2101cad (patch) | |
tree | 3d325023e6dc56baa6c69fc59dd55bf37ef7967e /net | |
parent | 04197c83b3e05546d1003cfa3ff43f1639c0057f (diff) | |
parent | b380b0d4f7dffcc235c0facefa537d4655619101 (diff) |
Merge branch 'linus' into x86/tracehook
Diffstat (limited to 'net')
76 files changed, 930 insertions, 665 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 4e59df5f8e0..1edfdf4c095 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -456,7 +456,7 @@ static void __exit bt_exit(void) subsys_initcall(bt_init); module_exit(bt_exit); -MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 12bba6207a8..80ba30cf4b6 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -736,7 +736,7 @@ MODULE_PARM_DESC(compress_src, "Compress sources headers"); module_param(compress_dst, bool, 0644); MODULE_PARM_DESC(compress_dst, "Compress destination headers"); -MODULE_AUTHOR("David Libault <david.libault@inventel.fr>, Maxim Krasnyansky <maxk@qualcomm.com>"); +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index c85bf8f678d..f4f6615cad9 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -3,8 +3,6 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/platform_device.h> - #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -12,10 +10,164 @@ #undef BT_DBG #define BT_DBG(D...) #endif + +struct class *bt_class = NULL; +EXPORT_SYMBOL_GPL(bt_class); + static struct workqueue_struct *btaddconn; static struct workqueue_struct *btdelconn; -static inline char *typetostr(int type) +static inline char *link_typetostr(int type) +{ + switch (type) { + case ACL_LINK: + return "ACL"; + case SCO_LINK: + return "SCO"; + case ESCO_LINK: + return "eSCO"; + default: + return "UNKNOWN"; + } +} + +static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", link_typetostr(conn->type)); +} + +static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + bdaddr_t bdaddr; + baswap(&bdaddr, &conn->dst); + return sprintf(buf, "%s\n", batostr(&bdaddr)); +} + +static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + + return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + conn->features[0], conn->features[1], + conn->features[2], conn->features[3], + conn->features[4], conn->features[5], + conn->features[6], conn->features[7]); +} + +#define LINK_ATTR(_name,_mode,_show,_store) \ +struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) + +static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); +static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); +static LINK_ATTR(features, S_IRUGO, show_link_features, NULL); + +static struct attribute *bt_link_attrs[] = { + &link_attr_type.attr, + &link_attr_address.attr, + &link_attr_features.attr, + NULL +}; + +static struct attribute_group bt_link_group = { + .attrs = bt_link_attrs, +}; + +static struct attribute_group *bt_link_groups[] = { + &bt_link_group, + NULL +}; + +static void bt_link_release(struct device *dev) +{ + void *data = dev_get_drvdata(dev); + kfree(data); +} + +static struct device_type bt_link = { + .name = "link", + .groups = bt_link_groups, + .release = bt_link_release, +}; + +static void add_conn(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, work); + + flush_workqueue(btdelconn); + + if (device_add(&conn->dev) < 0) { + BT_ERR("Failed to register connection device"); + return; + } +} + +void hci_conn_add_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->dev.type = &bt_link; + conn->dev.class = bt_class; + conn->dev.parent = &hdev->dev; + + snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d", + hdev->name, conn->handle); + + dev_set_drvdata(&conn->dev, conn); + + device_initialize(&conn->dev); + + INIT_WORK(&conn->work, add_conn); + + queue_work(btaddconn, &conn->work); +} + +/* + * The rfcomm tty device will possibly retain even when conn + * is down, and sysfs doesn't support move zombie device, + * so we should move the device before conn device is destroyed. + */ +static int __match_tty(struct device *dev, void *data) +{ + return !strncmp(dev->bus_id, "rfcomm", 6); +} + +static void del_conn(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, work); + struct hci_dev *hdev = conn->hdev; + + while (1) { + struct device *dev; + + dev = device_find_child(&conn->dev, NULL, __match_tty); + if (!dev) + break; + device_move(dev, NULL); + put_device(dev); + } + + device_del(&conn->dev); + put_device(&conn->dev); + hci_dev_put(hdev); +} + +void hci_conn_del_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + if (!device_is_registered(&conn->dev)) + return; + + INIT_WORK(&conn->work, del_conn); + + queue_work(btdelconn, &conn->work); +} + +static inline char *host_typetostr(int type) { switch (type) { case HCI_VIRTUAL: @@ -40,7 +192,7 @@ static inline char *typetostr(int type) static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", typetostr(hdev->type)); + return sprintf(buf, "%s\n", host_typetostr(hdev->type)); } static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) @@ -221,183 +373,62 @@ static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, show_sniff_min_interval, store_sniff_min_interval); -static struct device_attribute *bt_attrs[] = { - &dev_attr_type, - &dev_attr_name, - &dev_attr_class, - &dev_attr_address, - &dev_attr_features, - &dev_attr_manufacturer, - &dev_attr_hci_version, - &dev_attr_hci_revision, - &dev_attr_inquiry_cache, - &dev_attr_idle_timeout, - &dev_attr_sniff_max_interval, - &dev_attr_sniff_min_interval, +static struct attribute *bt_host_attrs[] = { + &dev_attr_type.attr, + &dev_attr_name.attr, + &dev_attr_class.attr, + &dev_attr_address.attr, + &dev_attr_features.attr, + &dev_attr_manufacturer.attr, + &dev_attr_hci_version.attr, + &dev_attr_hci_revision.attr, + &dev_attr_inquiry_cache.attr, + &dev_attr_idle_timeout.attr, + &dev_attr_sniff_max_interval.attr, + &dev_attr_sniff_min_interval.attr, NULL }; -static ssize_t show_conn_type(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct hci_conn *conn = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", conn->type == ACL_LINK ? "ACL" : "SCO"); -} - -static ssize_t show_conn_address(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct hci_conn *conn = dev_get_drvdata(dev); - bdaddr_t bdaddr; - baswap(&bdaddr, &conn->dst); - return sprintf(buf, "%s\n", batostr(&bdaddr)); -} - -static ssize_t show_conn_features(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct hci_conn *conn = dev_get_drvdata(dev); - - return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - conn->features[0], conn->features[1], - conn->features[2], conn->features[3], - conn->features[4], conn->features[5], - conn->features[6], conn->features[7]); -} - -#define CONN_ATTR(_name,_mode,_show,_store) \ -struct device_attribute conn_attr_##_name = __ATTR(_name,_mode,_show,_store) - -static CONN_ATTR(type, S_IRUGO, show_conn_type, NULL); -static CONN_ATTR(address, S_IRUGO, show_conn_address, NULL); -static CONN_ATTR(features, S_IRUGO, show_conn_features, NULL); - -static struct device_attribute *conn_attrs[] = { - &conn_attr_type, - &conn_attr_address, - &conn_attr_features, - NULL +static struct attribute_group bt_host_group = { + .attrs = bt_host_attrs, }; -struct class *bt_class = NULL; -EXPORT_SYMBOL_GPL(bt_class); - -static struct bus_type bt_bus = { - .name = "bluetooth", +static struct attribute_group *bt_host_groups[] = { + &bt_host_group, + NULL }; -static struct platform_device *bt_platform; - -static void bt_release(struct device *dev) +static void bt_host_release(struct device *dev) { void *data = dev_get_drvdata(dev); kfree(data); } -static void add_conn(struct work_struct *work) -{ - struct hci_conn *conn = container_of(work, struct hci_conn, work); - int i; - - flush_workqueue(btdelconn); - - if (device_add(&conn->dev) < 0) { - BT_ERR("Failed to register connection device"); - return; - } - - for (i = 0; conn_attrs[i]; i++) - if (device_create_file(&conn->dev, conn_attrs[i]) < 0) - BT_ERR("Failed to create connection attribute"); -} - -void hci_conn_add_sysfs(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - - BT_DBG("conn %p", conn); - - conn->dev.bus = &bt_bus; - conn->dev.parent = &hdev->dev; - - conn->dev.release = bt_release; - - snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d", - hdev->name, conn->handle); - - dev_set_drvdata(&conn->dev, conn); - - device_initialize(&conn->dev); - - INIT_WORK(&conn->work, add_conn); - - queue_work(btaddconn, &conn->work); -} - -/* - * The rfcomm tty device will possibly retain even when conn - * is down, and sysfs doesn't support move zombie device, - * so we should move the device before conn device is destroyed. - */ -static int __match_tty(struct device *dev, void *data) -{ - return !strncmp(dev->bus_id, "rfcomm", 6); -} - -static void del_conn(struct work_struct *work) -{ - struct hci_conn *conn = container_of(work, struct hci_conn, work); - struct hci_dev *hdev = conn->hdev; - - while (1) { - struct device *dev; - - dev = device_find_child(&conn->dev, NULL, __match_tty); - if (!dev) - break; - device_move(dev, NULL); - put_device(dev); - } - - device_del(&conn->dev); - put_device(&conn->dev); - hci_dev_put(hdev); -} - -void hci_conn_del_sysfs(struct hci_conn *conn) -{ - BT_DBG("conn %p", conn); - - if (!device_is_registered(&conn->dev)) - return; - - INIT_WORK(&conn->work, del_conn); - - queue_work(btdelconn, &conn->work); -} +static struct device_type bt_host = { + .name = "host", + .groups = bt_host_groups, + .release = bt_host_release, +}; int hci_register_sysfs(struct hci_dev *hdev) { struct device *dev = &hdev->dev; - unsigned int i; int err; BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); - dev->bus = &bt_bus; + dev->type = &bt_host; + dev->class = bt_class; dev->parent = hdev->parent; strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE); - dev->release = bt_release; - dev_set_drvdata(dev, hdev); err = device_register(dev); if (err < 0) return err; - for (i = 0; bt_attrs[i]; i++) - if (device_create_file(dev, bt_attrs[i]) < 0) - BT_ERR("Failed to create device attribute"); - return 0; } @@ -410,59 +441,30 @@ void hci_unregister_sysfs(struct hci_dev *hdev) int __init bt_sysfs_init(void) { - int err; - btaddconn = create_singlethread_workqueue("btaddconn"); - if (!btaddconn) { - err = -ENOMEM; - goto out; - } + if (!btaddconn) + return -ENOMEM; btdelconn = create_singlethread_workqueue("btdelconn"); if (!btdelconn) { - err = -ENOMEM; - goto out_del; - } - - bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0); - if (IS_ERR(bt_platform)) { - err = PTR_ERR(bt_platform); - goto out_platform; + destroy_workqueue(btaddconn); + return -ENOMEM; } - err = bus_register(&bt_bus); - if (err < 0) - goto out_bus; - bt_class = class_create(THIS_MODULE, "bluetooth"); if (IS_ERR(bt_class)) { - err = PTR_ERR(bt_class); - goto out_class; + destroy_workqueue(btdelconn); + destroy_workqueue(btaddconn); + return PTR_ERR(bt_class); } return 0; - -out_class: - bus_unregister(&bt_bus); -out_bus: - platform_device_unregister(bt_platform); -out_platform: - destroy_workqueue(btdelconn); -out_del: - destroy_workqueue(btaddconn); -out: - return err; } void bt_sysfs_cleanup(void) { destroy_workqueue(btaddconn); - destroy_workqueue(btdelconn); class_destroy(bt_class); - - bus_unregister(&bt_bus); - - platform_device_unregister(bt_platform); } diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index c1239852834..3396d5bdef1 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -2516,7 +2516,7 @@ EXPORT_SYMBOL(l2cap_load); module_init(l2cap_init); module_exit(l2cap_exit); -MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 6cfc7ba611b..ba537fae0a4 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -2115,7 +2115,7 @@ MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel"); module_param(l2cap_mtu, uint, 0644); MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); -MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 8cda4987486..a16011fedc1 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -1002,7 +1002,7 @@ module_exit(sco_exit); module_param(disable_esco, bool, 0644); MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); -MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 9b58d70b0e7..4f52c3d50eb 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -148,11 +148,16 @@ static int br_set_tx_csum(struct net_device *dev, u32 data) } static struct ethtool_ops br_ethtool_ops = { - .get_drvinfo = br_getinfo, - .get_link = ethtool_op_get_link, - .set_sg = br_set_sg, - .set_tx_csum = br_set_tx_csum, - .set_tso = br_set_tso, + .get_drvinfo = br_getinfo, + .get_link = ethtool_op_get_link, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = br_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = br_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = br_set_tso, + .get_ufo = ethtool_op_get_ufo, + .get_flags = ethtool_op_get_flags, }; void br_dev_setup(struct net_device *dev) diff --git a/net/core/datagram.c b/net/core/datagram.c index dd61dcad601..52f577a0f54 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -339,6 +339,93 @@ fault: return -EFAULT; } +/** + * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. + * @skb: buffer to copy + * @offset: offset in the buffer to start copying to + * @from: io vector to copy to + * @len: amount of data to copy to buffer from iovec + * + * Returns 0 or -EFAULT. + * Note: the iovec is modified during the copy. + */ +int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, + struct iovec *from, int len) +{ + int start = skb_headlen(skb); + int i, copy = start - offset; + + /* Copy header. */ + if (copy > 0) { + if (copy > len) + copy = len; + if (memcpy_fromiovec(skb->data + offset, from, copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + } + + /* Copy paged appendix. Hmm... why does this look so complicated? */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + int end; + + WARN_ON(start > offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; + if ((copy = end - offset) > 0) { + int err; + u8 *vaddr; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + struct page *page = frag->page; + + if (copy > len) + copy = len; + vaddr = kmap(page); + err = memcpy_fromiovec(vaddr + frag->page_offset + + offset - start, from, copy); + kunmap(page); + if (err) + goto fault; + + if (!(len -= copy)) + return 0; + offset += copy; + } + start = end; + } + + if (skb_shinfo(skb)->frag_list) { + struct sk_buff *list = skb_shinfo(skb)->frag_list; + + for (; list; list = list->next) { + int end; + + WARN_ON(start > offset + len); + + end = start + list->len; + if ((copy = end - offset) > 0) { + if (copy > len) + copy = len; + if (skb_copy_datagram_from_iovec(list, + offset - start, + from, copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + } + start = end; + } + } + if (!len) + return 0; + +fault: + return -EFAULT; +} +EXPORT_SYMBOL(skb_copy_datagram_from_iovec); + static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 __user *to, int len, __wsum *csump) diff --git a/net/core/dev.c b/net/core/dev.c index 600bb23c4c2..60c51f76588 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) } -void __netif_schedule(struct Qdisc *q) +static inline void __netif_reschedule(struct Qdisc *q) { - if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { - struct softnet_data *sd; - unsigned long flags; + struct softnet_data *sd; + unsigned long flags; - local_irq_save(flags); - sd = &__get_cpu_var(softnet_data); - q->next_sched = sd->output_queue; - sd->output_queue = q; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); - } + local_irq_save(flags); + sd = &__get_cpu_var(softnet_data); + q->next_sched = sd->output_queue; + sd->output_queue = q; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); +} + +void __netif_schedule(struct Qdisc *q) +{ + if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) + __netif_reschedule(q); } EXPORT_SYMBOL(__netif_schedule); @@ -1800,9 +1804,13 @@ gso: spin_lock(root_lock); - rc = qdisc_enqueue_root(skb, q); - qdisc_run(q); - + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { + kfree_skb(skb); + rc = NET_XMIT_DROP; + } else { + rc = qdisc_enqueue_root(skb, q); + qdisc_run(q); + } spin_unlock(root_lock); goto out; @@ -1974,15 +1982,17 @@ static void net_tx_action(struct softirq_action *h) head = head->next_sched; - smp_mb__before_clear_bit(); - clear_bit(__QDISC_STATE_SCHED, &q->state); - root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); qdisc_run(q); spin_unlock(root_lock); } else { - __netif_schedule(q); + if (!test_bit(__QDISC_STATE_DEACTIVATED, + &q->state)) + __netif_reschedule(q); } } } @@ -2084,7 +2094,8 @@ static int ing_filter(struct sk_buff *skb) q = rxq->qdisc; if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); - result = qdisc_enqueue_root(skb, q); + if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) + result = qdisc_enqueue_root(skb, q); spin_unlock(qdisc_lock(q)); } diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index a89f32fa94f..57abe8266be 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -99,7 +99,7 @@ struct gen_estimator_head static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; -/* Protects against NULL dereference and RCU write-side */ +/* Protects against NULL dereference */ static DEFINE_RWLOCK(est_lock); static void est_timer(unsigned long arg) @@ -185,7 +185,6 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; - write_lock_bh(&est_lock); if (!elist[idx].timer.function) { INIT_LIST_HEAD(&elist[idx].list); setup_timer(&elist[idx].timer, est_timer, idx); @@ -195,7 +194,6 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); list_add_rcu(&est->list, &elist[idx].list); - write_unlock_bh(&est_lock); return 0; } @@ -214,6 +212,7 @@ static void __gen_kill_estimator(struct rcu_head *head) * Removes the rate estimator specified by &bstats and &rate_est * and deletes the timer. * + * NOTE: Called under rtnl_mutex */ void gen_kill_estimator(struct gnet_stats_basic *bstats, struct gnet_stats_rate_est *rate_est) @@ -227,17 +226,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats, if (!elist[idx].timer.function) continue; - write_lock_bh(&est_lock); list_for_each_entry_safe(e, n, &elist[idx].list, list) { if (e->rate_est != rate_est || e->bstats != bstats) continue; + write_lock_bh(&est_lock); e->bstats = NULL; + write_unlock_bh(&est_lock); list_del_rcu(&e->list); call_rcu(&e->e_rcu, __gen_kill_estimator); } - write_unlock_bh(&est_lock); } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 84640172d65..ca1ccdf1ef7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2256,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) segs = nskb; tail = nskb; - nskb->dev = skb->dev; - skb_copy_queue_mapping(nskb, skb); - nskb->priority = skb->priority; - nskb->protocol = skb->protocol; - nskb->vlan_tci = skb->vlan_tci; - nskb->dst = dst_clone(skb->dst); - memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); - nskb->pkt_type = skb->pkt_type; + __copy_skb_header(nskb, skb); nskb->mac_len = skb->mac_len; skb_reserve(nskb, headroom); @@ -2274,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) skb_copy_from_linear_data(skb, skb_put(nskb, doffset), doffset); if (!sg) { + nskb->ip_summed = CHECKSUM_NONE; nskb->csum = skb_copy_and_csum_bits(skb, offset, skb_put(nskb, len), len, 0); @@ -2283,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) frag = skb_shinfo(nskb)->frags; k = 0; - nskb->ip_summed = CHECKSUM_PARTIAL; - nskb->csum = skb->csum; skb_copy_from_linear_data_offset(skb, offset, skb_put(nskb, hsize), hsize); diff --git a/net/dccp/input.c b/net/dccp/input.c index df2f110df94..803933ab396 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -411,12 +411,6 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, struct dccp_sock *dp = dccp_sk(sk); long tstamp = dccp_timestamp(); - /* Stop the REQUEST timer */ - inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); - WARN_ON(sk->sk_send_head == NULL); - __kfree_skb(sk->sk_send_head); - sk->sk_send_head = NULL; - if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh)) { dccp_pr_debug("invalid ackno: S.AWL=%llu, " @@ -441,6 +435,12 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, DCCP_ACKVEC_STATE_RECEIVED)) goto out_invalid_packet; /* FIXME: change error code */ + /* Stop the REQUEST timer */ + inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); + WARN_ON(sk->sk_send_head == NULL); + kfree_skb(sk->sk_send_head); + sk->sk_send_head = NULL; + dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_update_gsr(sk, dp->dccps_isr); /* diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 91d3d96805d..b12dae2b0b2 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1029,6 +1029,11 @@ skip: } } +static inline bool inetdev_valid_mtu(unsigned mtu) +{ + return mtu >= 68; +} + /* Called only under RTNL semaphore */ static int inetdev_event(struct notifier_block *this, unsigned long event, @@ -1048,6 +1053,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, IN_DEV_CONF_SET(in_dev, NOXFRM, 1); IN_DEV_CONF_SET(in_dev, NOPOLICY, 1); } + } else if (event == NETDEV_CHANGEMTU) { + /* Re-enabling IP */ + if (inetdev_valid_mtu(dev->mtu)) + in_dev = inetdev_init(dev); } goto out; } @@ -1058,7 +1067,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, dev->ip_ptr = NULL; break; case NETDEV_UP: - if (dev->mtu < 68) + if (!inetdev_valid_mtu(dev->mtu)) break; if (dev->flags & IFF_LOOPBACK) { struct in_ifaddr *ifa; @@ -1080,9 +1089,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, ip_mc_down(in_dev); break; case NETDEV_CHANGEMTU: - if (dev->mtu >= 68) + if (inetdev_valid_mtu(dev->mtu)) break; - /* MTU falled under 68, disable IP */ + /* disable IP when MTU is not enough */ case NETDEV_UNREGISTER: inetdev_destroy(in_dev); break; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 860558633b2..55c355e6323 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net) return net->ipv4.icmp_sk[smp_processor_id()]; } -static inline int icmp_xmit_lock(struct sock *sk) +static inline struct sock *icmp_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmp_sk(net); + if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static inline void icmp_xmit_unlock(struct sock *sk) @@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct rtable *rt = skb->rtable; struct net *net = dev_net(rt->u.dst.dev); - struct sock *sk = icmp_sk(net); - struct inet_sock *inet = inet_sk(sk); + struct sock *sk; + struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; + inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; @@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) if (!rt) goto out; net = dev_net(rt->u.dst.dev); - sk = icmp_sk(net); /* * Find the original header. It is expected to be valid, of course. @@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) } } - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; /* diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c index 49587a49722..462a22c9787 100644 --- a/net/ipv4/netfilter/ipt_addrtype.c +++ b/net/ipv4/netfilter/ipt_addrtype.c @@ -70,7 +70,7 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct net_device *in, (info->flags & IPT_ADDRTYPE_INVERT_SOURCE); if (ret && info->dest) ret &= match_type(dev, iph->daddr, info->dest) ^ - (info->flags & IPT_ADDRTYPE_INVERT_DEST); + !!(info->flags & IPT_ADDRTYPE_INVERT_DEST); return ret; } diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c index 91537f11273..6c4f11f5144 100644 --- a/net/ipv4/netfilter/nf_nat_proto_common.c +++ b/net/ipv4/netfilter/nf_nat_proto_common.c @@ -73,9 +73,13 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, range_size = ntohs(range->max.all) - min + 1; } - off = *rover; if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) - off = net_random(); + off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip, + maniptype == IP_NAT_MANIP_SRC + ? tuple->dst.u.all + : tuple->src.u.all); + else + off = *rover; for (i = 0; i < range_size; i++, off++) { *portptr = htons(min + off % range_size); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 16fc6f454a3..6ee5354c9aa 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2914,6 +2914,68 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table, return 0; } +static void rt_secret_reschedule(int old) +{ + struct net *net; + int new = ip_rt_secret_interval; + int diff = new - old; + + if (!diff) + return; + + rtnl_lock(); + for_each_net(net) { + int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); + + if (!new) + continue; + + if (deleted) { + long time = net->ipv4.rt_secret_timer.expires - jiffies; + + if (time <= 0 || (time += diff) <= 0) + time = 0; + + net->ipv4.rt_secret_timer.expires = time; + } else + net->ipv4.rt_secret_timer.expires = new; + + net->ipv4.rt_secret_timer.expires += jiffies; + add_timer(&net->ipv4.rt_secret_timer); + } + rtnl_unlock(); +} + +static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write, + struct file *filp, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int old = ip_rt_secret_interval; + int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos); + + rt_secret_reschedule(old); + + return ret; +} + +static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table, + int __user *name, + int nlen, + void __user *oldval, + size_t __user *oldlenp, + void __user *newval, + size_t newlen) +{ + int old = ip_rt_secret_interval; + int ret = sysctl_jiffies(table, name, nlen, oldval, oldlenp, newval, + newlen); + + rt_secret_reschedule(old); + + return ret; +} + static ctl_table ipv4_route_table[] = { { .ctl_name = NET_IPV4_ROUTE_GC_THRESH, @@ -3048,20 +3110,29 @@ static ctl_table ipv4_route_table[] = { .data = &ip_rt_secret_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies, + .proc_handler = &ipv4_sysctl_rt_secret_interval, + .strategy = &ipv4_sysctl_rt_secret_interval_strategy, }, { .ctl_name = 0 } }; -static __net_initdata struct ctl_path ipv4_route_path[] = { +static struct ctl_table empty[1]; + +static struct ctl_table ipv4_skeleton[] = +{ + { .procname = "route", .ctl_name = NET_IPV4_ROUTE, + .mode = 0555, .child = ipv4_route_table}, + { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH, + .mode = 0555, .child = empty}, + { } +}; + +static __net_initdata struct ctl_path ipv4_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "ipv4", .ctl_name = NET_IPV4, }, - { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, { }, }; - static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = NET_IPV4_ROUTE_FLUSH, @@ -3074,6 +3145,13 @@ static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = 0 }, }; +static __net_initdata struct ctl_path ipv4_route_path[] = { + { .procname = "net", .ctl_name = CTL_NET, }, + { .procname = "ipv4", .ctl_name = NET_IPV4, }, + { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, + { }, +}; + static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; @@ -3126,10 +3204,12 @@ static __net_init int rt_secret_timer_init(struct net *net) net->ipv4.rt_secret_timer.data = (unsigned long)net; init_timer_deferrable(&net->ipv4.rt_secret_timer); - net->ipv4.rt_secret_timer.expires = - jiffies + net_random() % ip_rt_secret_interval + - ip_rt_secret_interval; - add_timer(&net->ipv4.rt_secret_timer); + if (ip_rt_secret_interval) { + net->ipv4.rt_secret_timer.expires = + jiffies + net_random() % ip_rt_secret_interval + + ip_rt_secret_interval; + add_timer(&net->ipv4.rt_secret_timer); + } return 0; } @@ -3223,7 +3303,7 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - register_sysctl_paths(ipv4_route_path, ipv4_route_table); + register_sysctl_paths(ipv4_path, ipv4_skeleton); } #endif diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a00532de2a8..8165f5aa8c7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -468,7 +468,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, } if (likely(sysctl_tcp_window_scaling)) { opts->ws = tp->rx_opt.rcv_wscale; - size += TCPOLEN_WSCALE_ALIGNED; + if(likely(opts->ws)) + size += TCPOLEN_WSCALE_ALIGNED; } if (likely(sysctl_tcp_sack)) { opts->options |= OPTION_SACK_ADVERTISE; @@ -509,7 +510,8 @@ static unsigned tcp_synack_options(struct sock *sk, if (likely(ireq->wscale_ok)) { opts->ws = ireq->rcv_wscale; - size += TCPOLEN_WSCALE_ALIGNED; + if(likely(opts->ws)) + size += TCPOLEN_WSCALE_ALIGNED; } if (likely(doing_ts)) { opts->options |= OPTION_TS; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a7842c54f58..7b6a584b62d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1106,13 +1106,12 @@ out: return ret; } -int ipv6_dev_get_saddr(struct net_device *dst_dev, +int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr) { struct ipv6_saddr_score scores[2], *score = &scores[0], *hiscore = &scores[1]; - struct net *net = dev_net(dst_dev); struct ipv6_saddr_dst dst; struct net_device *dev; int dst_type; @@ -1689,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_dst_len = plen, .fc_flags = RTF_UP | flags, .fc_nlinfo.nl_net = dev_net(dev), + .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_copy(&cfg.fc_dst, pfx); diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 8d05527524e..f5de3f9dc69 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -93,7 +93,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, if (flags & RT6_LOOKUP_F_SRCPREF_COA) srcprefs |= IPV6_PREFER_SRC_COA; - if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, + if (ipv6_dev_get_saddr(net, + ip6_dst_idev(&rt->u.dst)->dev, &flp->fl6_dst, srcprefs, &saddr)) goto again; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index abedf95fdf2..b3157a0cc15 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = { .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; -static __inline__ int icmpv6_xmit_lock(struct sock *sk) +static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) @@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, fl.fl_icmp_code = code; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl)) goto out; @@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl.fl_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 52dddc25d3e..29c7c99e69f 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -378,6 +378,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) arg.skb = skb; arg.cb = cb; + arg.net = net; w->args = &arg; for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a4402de425d..0e844c2736a 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -934,7 +934,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, goto out_err_release; if (ipv6_addr_any(&fl->fl6_src)) { - err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev, + err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, &fl->fl6_dst, sk ? inet6_sk(sk)->srcprefs : 0, &fl->fl6_src); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 741cfcd96f8..4e5eac301f9 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -911,7 +911,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, } else { if (np->rxopt.bits.rxinfo) { struct in6_pktinfo src_info; - src_info.ipi6_ifindex = np->mcast_oif; + src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if; ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } @@ -921,7 +921,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, } if (np->rxopt.bits.rxoinfo) { struct in6_pktinfo src_info; - src_info.ipi6_ifindex = np->mcast_oif; + src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if; ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr); put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index beb48e3f038..f1c62ba0f56 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -549,7 +549,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, override = 0; in6_ifa_put(ifp); } else { - if (ipv6_dev_get_saddr(dev, daddr, + if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs, &tmpaddr)) return; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 01d47674f7e..e53e493606c 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -377,14 +377,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); - return 0; + return NET_RX_DROP; } /* Charge it to the socket. */ if (sock_queue_rcv_skb(sk,skb)<0) { atomic_inc(&sk->sk_drops); kfree_skb(skb); - return 0; + return NET_RX_DROP; } return 0; @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) if (skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); - return 0; + return NET_RX_DROP; } } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 41b165ffb36..9af6115f0f5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2106,7 +2106,8 @@ static inline size_t rt6_nlmsg_size(void) + nla_total_size(sizeof(struct rta_cacheinfo)); } -static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, +static int rt6_fill_node(struct net *net, + struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 pid, u32 seq, int prefix, int nowait, unsigned int flags) @@ -2189,7 +2190,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, } else if (dst) { struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); struct in6_addr saddr_buf; - if (ipv6_dev_get_saddr(idev ? idev->dev : NULL, + if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, dst, 0, &saddr_buf) == 0) NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); } @@ -2234,7 +2235,8 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) } else prefix = 0; - return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, + return rt6_fill_node(arg->net, + arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, prefix, 0, NLM_F_MULTI); } @@ -2300,7 +2302,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); skb->dst = &rt->u.dst; - err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, + err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 0, 0, 0); if (err < 0) { @@ -2327,7 +2329,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) if (skb == NULL) goto errout; - err = rt6_fill_node(skb, rt, NULL, NULL, 0, + err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, event, info->pid, seq, 0, 0, 0); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index e6dfaeac6be..587f8f60c48 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base; int ipv6_static_sysctl_register(void) { static struct ctl_table empty[1]; - ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty); + ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); if (ip6_base == NULL) return -ENOMEM; return 0; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8f1e0543b3c..08e4cbbe3f0 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -52,12 +52,14 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr, static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr) { struct dst_entry *dst; + struct net_device *dev; dst = xfrm6_dst_lookup(0, NULL, daddr); if (IS_ERR(dst)) return -EHOSTUNREACH; - ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev, + dev = ip6_dst_idev(dst)->dev; + ipv6_dev_get_saddr(dev_net(dev), dev, (struct in6_addr *)&daddr->a6, 0, (struct in6_addr *)&saddr->a6); dst_release(dst); diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 7439b63df5d..cf82acec913 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -265,7 +265,7 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) key = sdata->default_key; if (key) { sprintf(buf, "../keys/%d", key->debugfs.cnt); - sdata->debugfs.default_key = + sdata->common_debugfs.default_key = debugfs_create_symlink("default_key", sdata->debugfsdir, buf); } else @@ -277,8 +277,8 @@ void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata) if (!sdata) return; - debugfs_remove(sdata->debugfs.default_key); - sdata->debugfs.default_key = NULL; + debugfs_remove(sdata->common_debugfs.default_key); + sdata->common_debugfs.default_key = NULL; } void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 475f89a8aee..8165df578c9 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -248,8 +248,8 @@ IEEE80211_IF_WFILE(min_discovery_timeout, static void add_sta_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted, sta); - DEBUGFS_ADD(force_unicast_rateidx, ap); - DEBUGFS_ADD(max_ratectrl_rateidx, ap); + DEBUGFS_ADD(force_unicast_rateidx, sta); + DEBUGFS_ADD(max_ratectrl_rateidx, sta); DEBUGFS_ADD(state, sta); DEBUGFS_ADD(bssid, sta); @@ -283,8 +283,8 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata) static void add_wds_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted, wds); - DEBUGFS_ADD(force_unicast_rateidx, ap); - DEBUGFS_ADD(max_ratectrl_rateidx, ap); + DEBUGFS_ADD(force_unicast_rateidx, wds); + DEBUGFS_ADD(max_ratectrl_rateidx, wds); DEBUGFS_ADD(peer, wds); } @@ -292,8 +292,8 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata) static void add_vlan_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted, vlan); - DEBUGFS_ADD(force_unicast_rateidx, ap); - DEBUGFS_ADD(max_ratectrl_rateidx, ap); + DEBUGFS_ADD(force_unicast_rateidx, vlan); + DEBUGFS_ADD(max_ratectrl_rateidx, vlan); } static void add_monitor_files(struct ieee80211_sub_if_data *sdata) @@ -381,8 +381,8 @@ static void add_files(struct ieee80211_sub_if_data *sdata) static void del_sta_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_DEL(drop_unencrypted, sta); - DEBUGFS_DEL(force_unicast_rateidx, ap); - DEBUGFS_DEL(max_ratectrl_rateidx, ap); + DEBUGFS_DEL(force_unicast_rateidx, sta); + DEBUGFS_DEL(max_ratectrl_rateidx, sta); DEBUGFS_DEL(state, sta); DEBUGFS_DEL(bssid, sta); @@ -416,8 +416,8 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata) static void del_wds_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_DEL(drop_unencrypted, wds); - DEBUGFS_DEL(force_unicast_rateidx, ap); - DEBUGFS_DEL(max_ratectrl_rateidx, ap); + DEBUGFS_DEL(force_unicast_rateidx, wds); + DEBUGFS_DEL(max_ratectrl_rateidx, wds); DEBUGFS_DEL(peer, wds); } @@ -425,8 +425,8 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata) static void del_vlan_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_DEL(drop_unencrypted, vlan); - DEBUGFS_DEL(force_unicast_rateidx, ap); - DEBUGFS_DEL(max_ratectrl_rateidx, ap); + DEBUGFS_DEL(force_unicast_rateidx, vlan); + DEBUGFS_DEL(max_ratectrl_rateidx, vlan); } static void del_monitor_files(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ec59345af65..4498d871365 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -470,6 +470,8 @@ struct ieee80211_sub_if_data { struct dentry *auth_transaction; struct dentry *flags; struct dentry *num_beacons_sta; + struct dentry *force_unicast_rateidx; + struct dentry *max_ratectrl_rateidx; } sta; struct { struct dentry *drop_unencrypted; @@ -483,15 +485,21 @@ struct ieee80211_sub_if_data { struct { struct dentry *drop_unencrypted; struct dentry *peer; + struct dentry *force_unicast_rateidx; + struct dentry *max_ratectrl_rateidx; } wds; struct { struct dentry *drop_unencrypted; + struct dentry *force_unicast_rateidx; + struct dentry *max_ratectrl_rateidx; } vlan; struct { struct dentry *mode; } monitor; - struct dentry *default_key; } debugfs; + struct { + struct dentry *default_key; + } common_debugfs; #ifdef CONFIG_MAC80211_MESH struct dentry *mesh_stats_dir; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index b5933b27149..35f2f95f2fa 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -383,7 +383,7 @@ errcopy: hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) tbl->free_node(p, 0); } - __mesh_table_free(tbl); + __mesh_table_free(newtbl); endgrow: return NULL; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e1d11c9b672..9bb68c6a8f4 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -478,51 +478,21 @@ int ieee80211_ht_addt_info_ie_to_ht_bss_info( static void ieee80211_sta_send_associnfo(struct net_device *dev, struct ieee80211_if_sta *ifsta) { - char *buf; - size_t len; - int i; union iwreq_data wrqu; - if (!ifsta->assocreq_ies && !ifsta->assocresp_ies) - return; - - buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len + - ifsta->assocresp_ies_len), GFP_KERNEL); - if (!buf) - return; - - len = sprintf(buf, "ASSOCINFO("); if (ifsta->assocreq_ies) { - len += sprintf(buf + len, "ReqIEs="); - for (i = 0; i < ifsta->assocreq_ies_len; i++) { - len += sprintf(buf + len, "%02x", - ifsta->assocreq_ies[i]); - } + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = ifsta->assocreq_ies_len; + wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, + ifsta->assocreq_ies); } - if (ifsta->assocresp_ies) { - if (ifsta->assocreq_ies) - len += sprintf(buf + len, " "); - len += sprintf(buf + len, "RespIEs="); - for (i = 0; i < ifsta->assocresp_ies_len; i++) { - len += sprintf(buf + len, "%02x", - ifsta->assocresp_ies[i]); - } - } - len += sprintf(buf + len, ")"); - if (len > IW_CUSTOM_MAX) { - len = sprintf(buf, "ASSOCRESPIE="); - for (i = 0; i < ifsta->assocresp_ies_len; i++) { - len += sprintf(buf + len, "%02x", - ifsta->assocresp_ies[i]); - } + if (ifsta->assocresp_ies) { + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = ifsta->assocresp_ies_len; + wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, + ifsta->assocresp_ies); } - - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = len; - wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); - - kfree(buf); } @@ -813,7 +783,7 @@ static void ieee80211_send_assoc(struct net_device *dev, } } - if (count == 8) { + if (rates_len > count) { pos = skb_put(skb, rates_len - count + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = rates_len - count; @@ -2103,6 +2073,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); return; } + /* update new sta with its last rx activity */ + sta->last_rx = jiffies; } /* @@ -2866,7 +2838,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev, jiffies); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ if (beacon_timestamp > rx_timestamp) { -#ifndef CONFIG_MAC80211_IBSS_DEBUG +#ifdef CONFIG_MAC80211_IBSS_DEBUG printk(KERN_DEBUG "%s: beacon TSF higher than " "local TSF - IBSS merge with BSSID %s\n", dev->name, print_mac(mac, mgmt->bssid)); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 105a616c5c7..a8752031adc 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -968,7 +968,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) /* need to zero data of old helper */ memset(&help->help, 0, sizeof(help->help)); } else { - help = nf_ct_helper_ext_add(ct, GFP_KERNEL); + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help == NULL) return -ENOMEM; } @@ -1136,16 +1136,33 @@ ctnetlink_create_conntrack(struct nlattr *cda[], ct->timeout.expires = jiffies + ct->timeout.expires * HZ; ct->status |= IPS_CONFIRMED; + rcu_read_lock(); + helper = __nf_ct_helper_find(rtuple); + if (helper) { + help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); + if (help == NULL) { + rcu_read_unlock(); + err = -ENOMEM; + goto err; + } + /* not in hash table yet so not strictly necessary */ + rcu_assign_pointer(help->helper, helper); + } + if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto err; + } } if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto err; + } } nf_ct_acct_ext_add(ct, GFP_KERNEL); @@ -1155,19 +1172,6 @@ ctnetlink_create_conntrack(struct nlattr *cda[], ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); #endif - rcu_read_lock(); - helper = __nf_ct_helper_find(rtuple); - if (helper) { - help = nf_ct_helper_ext_add(ct, GFP_KERNEL); - if (help == NULL) { - rcu_read_unlock(); - err = -ENOMEM; - goto err; - } - /* not in hash table yet so not strictly necessary */ - rcu_assign_pointer(help->helper, helper); - } - /* setup master conntrack: this is a confirmed expectation */ if (master_ct) { __set_bit(IPS_EXPECTED_BIT, &ct->status); diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index d2d45655cd1..74aecc098ba 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c @@ -150,6 +150,8 @@ static void update_rfkill_state(struct rfkill *rfkill) * calls and handling all the red tape such as issuing notifications * if the call is successful. * + * Suspended devices are not touched at all, and -EAGAIN is returned. + * * Note that the @force parameter cannot override a (possibly cached) * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of * RFKILL_STATE_HARD_BLOCKED implements either get_state() or @@ -168,6 +170,9 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, int retval = 0; enum rfkill_state oldstate, newstate; + if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) + return -EBUSY; + oldstate = rfkill->state; if (rfkill->get_state && !force && @@ -214,7 +219,7 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, * * This function toggles the state of all switches of given type, * unless a specific switch is claimed by userspace (in which case, - * that switch is left alone). + * that switch is left alone) or suspended. */ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) { @@ -239,8 +244,8 @@ EXPORT_SYMBOL(rfkill_switch_all); /** * rfkill_epo - emergency power off all transmitters * - * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring - * everything in its path but rfkill_mutex and rfkill->mutex. + * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, + * ignoring everything in its path but rfkill_mutex and rfkill->mutex. */ void rfkill_epo(void) { @@ -372,7 +377,7 @@ static ssize_t rfkill_claim_show(struct device *dev, { struct rfkill *rfkill = to_rfkill(dev); - return sprintf(buf, "%d", rfkill->user_claim); + return sprintf(buf, "%d\n", rfkill->user_claim); } static ssize_t rfkill_claim_store(struct device *dev, @@ -458,13 +463,14 @@ static int rfkill_resume(struct device *dev) if (dev->power.power_state.event != PM_EVENT_ON) { mutex_lock(&rfkill->mutex); + dev->power.power_state.event = PM_EVENT_ON; + /* restore radio state AND notify everybody */ rfkill_toggle_radio(rfkill, rfkill->state, 1); mutex_unlock(&rfkill->mutex); } - dev->power.power_state = PMSG_ON; return 0; } #else diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index d2b6f54a626..8eb79e92e94 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -205,7 +205,7 @@ replay: } } - root_lock = qdisc_root_lock(q); + root_lock = qdisc_root_sleeping_lock(q); if (tp == NULL) { /* Proto-tcf does not exist, create new one */ @@ -280,7 +280,7 @@ replay: if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { spin_lock_bh(root_lock); *back = tp->next; - spin_lock_bh(root_lock); + spin_unlock_bh(root_lock); tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); tcf_destroy(tp); diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 481260a4f10..e3d8455eebc 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -75,7 +75,7 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif) static inline void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) { - spinlock_t *root_lock = qdisc_root_lock(q); + spinlock_t *root_lock = qdisc_root_sleeping_lock(q); spin_lock_bh(root_lock); memset(head->fastmap, 0, sizeof(head->fastmap)); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index c25465e5607..1122c952aa9 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -27,6 +27,7 @@ #include <linux/kmod.h> #include <linux/list.h> #include <linux/hrtimer.h> +#include <linux/lockdep.h> #include <net/net_namespace.h> #include <net/sock.h> @@ -198,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) return NULL; } +/* + * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() + * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() + */ +static DEFINE_SPINLOCK(qdisc_list_lock); + +static void qdisc_list_add(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); + spin_unlock_bh(&qdisc_list_lock); + } +} + +void qdisc_list_del(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_del(&q->list); + spin_unlock_bh(&qdisc_list_lock); + } +} +EXPORT_SYMBOL(qdisc_list_del); + struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { unsigned int i; + struct Qdisc *q; + + spin_lock_bh(&qdisc_list_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - struct Qdisc *q, *txq_root = txq->qdisc_sleeping; + struct Qdisc *txq_root = txq->qdisc_sleeping; q = qdisc_match_from_root(txq_root, handle); if (q) - return q; + goto unlock; } - return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + + q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + +unlock: + spin_unlock_bh(&qdisc_list_lock); + + return q; } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) @@ -331,7 +366,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) if (!s || tsize != s->tsize || (!tab && tsize > 0)) return ERR_PTR(-EINVAL); - spin_lock_bh(&qdisc_stab_lock); + spin_lock(&qdisc_stab_lock); list_for_each_entry(stab, &qdisc_stab_list, list) { if (memcmp(&stab->szopts, s, sizeof(*s))) @@ -339,11 +374,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) continue; stab->refcnt++; - spin_unlock_bh(&qdisc_stab_lock); + spin_unlock(&qdisc_stab_lock); return stab; } - spin_unlock_bh(&qdisc_stab_lock); + spin_unlock(&qdisc_stab_lock); stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); if (!stab) @@ -354,9 +389,9 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) if (tsize > 0) memcpy(stab->data, tab, tsize * sizeof(u16)); - spin_lock_bh(&qdisc_stab_lock); + spin_lock(&qdisc_stab_lock); list_add_tail(&stab->list, &qdisc_stab_list); - spin_unlock_bh(&qdisc_stab_lock); + spin_unlock(&qdisc_stab_lock); return stab; } @@ -366,14 +401,14 @@ void qdisc_put_stab(struct qdisc_size_table *tab) if (!tab) return; - spin_lock_bh(&qdisc_stab_lock); + spin_lock(&qdisc_stab_lock); if (--tab->refcnt == 0) { list_del(&tab->list); kfree(tab); } - spin_unlock_bh(&qdisc_stab_lock); + spin_unlock(&qdisc_stab_lock); } EXPORT_SYMBOL(qdisc_put_stab); @@ -426,7 +461,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) wd->qdisc->flags &= ~TCQ_F_THROTTLED; smp_wmb(); - __netif_schedule(wd->qdisc); + __netif_schedule(qdisc_root(wd->qdisc)); return HRTIMER_NORESTART; } @@ -443,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) { ktime_t time; + if (test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(wd->qdisc)->state)) + return; + wd->qdisc->flags |= TCQ_F_THROTTLED; time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_US2NS(expires)); @@ -585,7 +624,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; spinlock_t *root_lock; - root_lock = qdisc_root_lock(oqdisc); + root_lock = qdisc_lock(oqdisc); spin_lock_bh(root_lock); /* Prune old scheduler */ @@ -596,7 +635,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, if (qdisc == NULL) qdisc = &noop_qdisc; dev_queue->qdisc_sleeping = qdisc; - dev_queue->qdisc = &noop_qdisc; + rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); spin_unlock_bh(root_lock); @@ -637,11 +676,8 @@ static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid if (new || old) qdisc_notify(skb, n, clid, old, new); - if (old) { - spin_lock_bh(&old->q.lock); + if (old) qdisc_destroy(old); - spin_unlock_bh(&old->q.lock); - } } /* Graft qdisc "new" to class "classid" of qdisc "parent" or @@ -707,6 +743,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, return err; } +/* lockdep annotation is needed for ingress; egress gets it only for name */ +static struct lock_class_key qdisc_tx_lock; +static struct lock_class_key qdisc_rx_lock; + /* Allocate and initialize new qdisc. @@ -767,6 +807,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, if (handle == TC_H_INGRESS) { sch->flags |= TCQ_F_INGRESS; handle = TC_H_MAKE(TC_H_INGRESS, 0); + lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock); } else { if (handle == 0) { handle = qdisc_alloc_handle(dev); @@ -774,6 +815,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, if (handle == 0) goto err_out3; } + lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); } sch->handle = handle; @@ -788,9 +830,16 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, sch->stab = stab; } if (tca[TCA_RATE]) { + spinlock_t *root_lock; + + if ((sch->parent != TC_H_ROOT) && + !(sch->flags & TCQ_F_INGRESS)) + root_lock = qdisc_root_sleeping_lock(sch); + else + root_lock = qdisc_lock(sch); + err = gen_new_estimator(&sch->bstats, &sch->rate_est, - qdisc_root_lock(sch), - tca[TCA_RATE]); + root_lock, tca[TCA_RATE]); if (err) { /* * Any broken qdiscs that would require @@ -802,8 +851,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out3; } } - if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) - list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); + + qdisc_list_add(sch); return sch; } @@ -842,7 +891,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) if (tca[TCA_RATE]) gen_replace_estimator(&sch->bstats, &sch->rate_est, - qdisc_root_lock(sch), tca[TCA_RATE]); + qdisc_root_sleeping_lock(sch), + tca[TCA_RATE]); return 0; } @@ -1084,20 +1134,13 @@ create_n_graft: } graft: - if (1) { - spinlock_t *root_lock; - - err = qdisc_graft(dev, p, skb, n, clid, q, NULL); - if (err) { - if (q) { - root_lock = qdisc_root_lock(q); - spin_lock_bh(root_lock); - qdisc_destroy(q); - spin_unlock_bh(root_lock); - } - return err; - } + err = qdisc_graft(dev, p, skb, n, clid, q, NULL); + if (err) { + if (q) + qdisc_destroy(q); + return err; } + return 0; } @@ -1126,8 +1169,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, if (q->stab && qdisc_dump_stab(skb, q->stab) < 0) goto nla_put_failure; - if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, - TCA_XSTATS, qdisc_root_lock(q), &d) < 0) + if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, + qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) @@ -1418,8 +1461,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) goto nla_put_failure; - if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, - TCA_XSTATS, qdisc_root_lock(q), &d) < 0) + if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, + qdisc_root_sleeping_lock(q), &d) < 0) goto nla_put_failure; if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 4e261ce62f4..8b06fa90048 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl) struct cbq_sched_data *q = qdisc_priv(cl->qdisc); psched_tdiff_t delay = cl->undertime - q->now; + if (test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(cl->qdisc)->state)) + return; + if (!cl->delayed) { psched_time_t sched = q->now; ktime_t expires; @@ -654,7 +658,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) } sch->flags &= ~TCQ_F_THROTTLED; - __netif_schedule(sch); + __netif_schedule(qdisc_root(sch)); return HRTIMER_NORESTART; } @@ -1750,7 +1754,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg) if (--cl->refcnt == 0) { #ifdef CONFIG_NET_CLS_ACT - spinlock_t *root_lock = qdisc_root_lock(sch); + spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); struct cbq_sched_data *q = qdisc_priv(sch); spin_lock_bh(root_lock); @@ -1835,7 +1839,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); return 0; } @@ -1926,7 +1930,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (tca[TCA_RATE]) gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), tca[TCA_RATE]); + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); *arg = (unsigned long)cl; return 0; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 468574682ca..9634091ee2f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -518,15 +518,17 @@ void qdisc_reset(struct Qdisc *qdisc) } EXPORT_SYMBOL(qdisc_reset); -/* this is the rcu callback function to clean up a qdisc when there - * are no further references to it */ - -static void __qdisc_destroy(struct rcu_head *head) +void qdisc_destroy(struct Qdisc *qdisc) { - struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); const struct Qdisc_ops *ops = qdisc->ops; + if (qdisc->flags & TCQ_F_BUILTIN || + !atomic_dec_and_test(&qdisc->refcnt)) + return; + #ifdef CONFIG_NET_SCHED + qdisc_list_del(qdisc); + qdisc_put_stab(qdisc->stab); #endif gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); @@ -542,20 +544,6 @@ static void __qdisc_destroy(struct rcu_head *head) kfree((char *) qdisc - qdisc->padded); } - -/* Under qdisc_lock(qdisc) and BH! */ - -void qdisc_destroy(struct Qdisc *qdisc) -{ - if (qdisc->flags & TCQ_F_BUILTIN || - !atomic_dec_and_test(&qdisc->refcnt)) - return; - - if (qdisc->parent) - list_del(&qdisc->list); - - call_rcu(&qdisc->q_rcu, __qdisc_destroy); -} EXPORT_SYMBOL(qdisc_destroy); static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) @@ -597,6 +585,9 @@ static void transition_one_qdisc(struct net_device *dev, struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; int *need_watchdog_p = _need_watchdog; + if (!(new_qdisc->flags & TCQ_F_BUILTIN)) + clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); + rcu_assign_pointer(dev_queue->qdisc, new_qdisc); if (need_watchdog_p && new_qdisc != &noqueue_qdisc) *need_watchdog_p = 1; @@ -640,14 +631,17 @@ static void dev_deactivate_queue(struct net_device *dev, if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); - dev_queue->qdisc = qdisc_default; + if (!(qdisc->flags & TCQ_F_BUILTIN)) + set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); + + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); qdisc_reset(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); } } -static bool some_qdisc_is_busy(struct net_device *dev, int lock) +static bool some_qdisc_is_busy(struct net_device *dev) { unsigned int i; @@ -661,14 +655,12 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock) q = dev_queue->qdisc_sleeping; root_lock = qdisc_lock(q); - if (lock) - spin_lock_bh(root_lock); + spin_lock_bh(root_lock); val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || test_bit(__QDISC_STATE_SCHED, &q->state)); - if (lock) - spin_unlock_bh(root_lock); + spin_unlock_bh(root_lock); if (val) return true; @@ -678,8 +670,6 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock) void dev_deactivate(struct net_device *dev) { - bool running; - netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); @@ -689,25 +679,8 @@ void dev_deactivate(struct net_device *dev) synchronize_rcu(); /* Wait for outstanding qdisc_run calls. */ - do { - while (some_qdisc_is_busy(dev, 0)) - yield(); - - /* - * Double-check inside queue lock to ensure that all effects - * of the queue run are visible when we return. - */ - running = some_qdisc_is_busy(dev, 1); - - /* - * The running flag should never be set at this point because - * we've already set dev->qdisc to noop_qdisc *inside* the same - * pair of spin locks. That is, if any qdisc_run starts after - * our initial test it should see the noop_qdisc and then - * clear the RUNNING bit before dropping the queue lock. So - * if it is set here then we've found a bug. - */ - } while (WARN_ON_ONCE(running)); + while (some_qdisc_is_busy(dev)) + yield(); } static void dev_init_scheduler_queue(struct net_device *dev, @@ -736,14 +709,10 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { - spinlock_t *root_lock = qdisc_lock(qdisc); - - dev_queue->qdisc = qdisc_default; + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); dev_queue->qdisc_sleeping = qdisc_default; - spin_lock_bh(root_lock); qdisc_destroy(qdisc); - spin_unlock_bh(root_lock); } } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index c2b8d9cce3d..c1e77da8cd0 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); return 0; } @@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), tca[TCA_RATE]); + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); *arg = (unsigned long)cl; return 0; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 6febd245e62..d14f02056ae 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -577,7 +577,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) sch->qstats.drops++; cl->qstats.drops++; } - return NET_XMIT_DROP; + return ret; } else { cl->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; @@ -623,7 +623,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) sch->qstats.drops++; cl->qstats.drops++; } - return NET_XMIT_DROP; + return ret; } else htb_activate(q, cl); @@ -1043,7 +1043,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) { - spinlock_t *root_lock = qdisc_root_lock(sch); + spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); struct htb_sched *q = qdisc_priv(sch); struct nlattr *nest; struct tc_htb_glob gopt; @@ -1075,7 +1075,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct htb_class *cl = (struct htb_class *)arg; - spinlock_t *root_lock = qdisc_root_lock(sch); + spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); struct nlattr *nest; struct tc_htb_opt opt; @@ -1372,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, goto failure; gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE] ? : &est.nla); cl->refcnt = 1; cl->children = 0; @@ -1427,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } else { if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_lock(sch), + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); sch_tree_lock(sch); } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index fb0294d0b55..3781e55046d 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -341,7 +341,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) for (i = 0; i < n; i++) d->table[i] = data[i]; - root_lock = qdisc_root_lock(sch); + root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); d = xchg(&q->delay_dist, d); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index eac197610ed..a6697c686c7 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -113,11 +113,11 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) { sch->q.qlen++; sch->qstats.requeues++; - return 0; + return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) sch->qstats.drops++; - return NET_XMIT_DROP; + return ret; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 7d3b7ff3bf0..94c61598b86 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) struct tbf_sched_data *q = qdisc_priv(sch); int ret; - if (qdisc_pkt_len(skb) > q->max_size) { - sch->qstats.drops++; -#ifdef CONFIG_NET_CLS_ACT - if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) -#endif - kfree_skb(skb); - - return NET_XMIT_DROP; - } + if (qdisc_pkt_len(skb) > q->max_size) + return qdisc_reshape_fail(skb, sch); ret = qdisc_enqueue(skb, q->qdisc); if (ret != 0) { diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 2c35c678563..d35ef059abb 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -161,7 +161,7 @@ teql_destroy(struct Qdisc* sch) txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; - root_lock = qdisc_root_lock(txq->qdisc); + root_lock = qdisc_root_sleeping_lock(txq->qdisc); spin_lock_bh(root_lock); qdisc_reset(txq->qdisc); spin_unlock_bh(root_lock); diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 675a5c3e68a..52db5f60daa 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) { struct sctp_auth_bytes *key; + /* Verify that we are not going to overflow INT_MAX */ + if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) + return NULL; + /* Allocate the shared key */ key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); if (!key) @@ -782,6 +786,9 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, for (i = 0; i < hmacs->shmac_num_idents; i++) { id = hmacs->shmac_idents[i]; + if (id > SCTP_AUTH_HMAC_ID_MAX) + return -EOPNOTSUPP; + if (SCTP_AUTH_HMAC_ID_SHA1 == id) has_sha1 = 1; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e39a0cdef18..4c8d9f45ce0 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Initialize the CHUNKS parameter */ auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; + auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); /* If the Add-IP functionality is enabled, we must * authenticate, ASCONF and ASCONF-ACK chunks @@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, if (sctp_addip_enable) { auth_chunks->chunks[0] = SCTP_CID_ASCONF; auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; - auth_chunks->param_hdr.length = - htons(sizeof(sctp_paramhdr_t) + 2); + auth_chunks->param_hdr.length += htons(2); } } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 483a01d0740..47f91afa021 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -319,7 +319,8 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, __func__, asoc, dst, NIP6(daddr->v6.sin6_addr)); if (!asoc) { - ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, + ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)), + dst ? ip6_dst_idev(dst)->dev : NULL, &daddr->v6.sin6_addr, inet6_sk(&sk->inet.sk)->srcprefs, &saddr->v6.sin6_addr); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dbb79adf8f3..5ffb9dec1c3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, { struct sctp_authchunk val; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3083,8 +3086,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, int optlen) { struct sctp_hmacalgo *hmacs; + u32 idents; int err; + if (!sctp_auth_enable) + return -EACCES; + if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; @@ -3097,8 +3104,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, goto out; } - if (hmacs->shmac_num_idents == 0 || - hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) { + idents = hmacs->shmac_num_idents; + if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || + (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } @@ -3123,6 +3131,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk, struct sctp_association *asoc; int ret; + if (!sctp_auth_enable) + return -EACCES; + if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; @@ -3135,6 +3146,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk, goto out; } + if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { + ret = -EINVAL; + goto out; + } + asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; @@ -3160,6 +3176,9 @@ static int sctp_setsockopt_active_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3185,6 +3204,9 @@ static int sctp_setsockopt_del_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -5197,19 +5219,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { + struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; - __u16 param_len; + __u16 data_len = 0; + u32 num_idents; + + if (!sctp_auth_enable) + return -EACCES; hmacs = sctp_sk(sk)->ep->auth_hmacs_list; - param_len = ntohs(hmacs->param_hdr.length); + data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); - if (len < param_len) + if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; + + len = sizeof(struct sctp_hmacalgo) + data_len; + num_idents = data_len / sizeof(u16); + if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval, hmacs->hmac_ids, len)) + if (put_user(num_idents, &p->shmac_num_idents)) + return -EFAULT; + if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) return -EFAULT; - return 0; } @@ -5219,6 +5251,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) @@ -5233,6 +5268,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, else val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; + len = sizeof(struct sctp_authkeyid); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + return 0; } @@ -5243,13 +5284,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks; + u32 num_chunks = 0; char __user *to; - if (len <= sizeof(struct sctp_authchunks)) + if (!sctp_auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5258,20 +5302,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, return -EINVAL; ch = asoc->peer.peer_chunks; + if (!ch) + goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; - len = num_chunks; - if (put_user(len, optlen)) + if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; + if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; - if (copy_to_user(to, ch->chunks, len)) - return -EFAULT; - return 0; } @@ -5282,13 +5327,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks; + u32 num_chunks = 0; char __user *to; - if (len <= sizeof(struct sctp_authchunks)) + if (!sctp_auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5301,17 +5349,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, else ch = sctp_sk(sk)->ep->auth_chunk_list; + if (!ch) + goto num; + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); - if (len < num_chunks) + if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; - len = num_chunks; + if (copy_to_user(to, ch->chunks, num_chunks)) + return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; - if (copy_to_user(to, ch->chunks, len)) - return -EFAULT; return 0; } diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index 0f8c439b848..5231f7aaac0 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c @@ -60,24 +60,14 @@ static int proc_do_xprt(ctl_table *table, int write, struct file *file, void __user *buffer, size_t *lenp, loff_t *ppos) { char tmpbuf[256]; - int len; + size_t len; + if ((*ppos && !write) || !*lenp) { *lenp = 0; return 0; } - if (write) - return -EINVAL; - else { - len = svc_print_xprts(tmpbuf, sizeof(tmpbuf)); - if (!access_ok(VERIFY_WRITE, buffer, len)) - return -EFAULT; - - if (__copy_to_user(buffer, tmpbuf, len)) - return -EFAULT; - } - *lenp -= len; - *ppos += len; - return 0; + len = svc_print_xprts(tmpbuf, sizeof(tmpbuf)); + return simple_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len); } static int diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index b4b17f44cb2..74de31a0661 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -443,18 +443,18 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) dprintk("svcrdma: rqstp=%p\n", rqstp); - spin_lock_bh(&rdma_xprt->sc_read_complete_lock); + spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); if (!list_empty(&rdma_xprt->sc_read_complete_q)) { ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); } - spin_unlock_bh(&rdma_xprt->sc_read_complete_lock); - if (ctxt) + if (ctxt) { + spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); return rdma_read_complete(rqstp, ctxt); + } - spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, struct svc_rdma_op_ctxt, diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 19ddc382b77..900cb69728c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -359,11 +359,11 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; BUG_ON(!read_hdr); + spin_lock_bh(&xprt->sc_rq_dto_lock); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); - spin_lock_bh(&xprt->sc_read_complete_lock); list_add_tail(&read_hdr->dto_q, &xprt->sc_read_complete_q); - spin_unlock_bh(&xprt->sc_read_complete_lock); + spin_unlock_bh(&xprt->sc_rq_dto_lock); svc_xprt_enqueue(&xprt->sc_xprt); } svc_rdma_put_context(ctxt, 0); @@ -428,7 +428,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, init_waitqueue_head(&cma_xprt->sc_send_wait); spin_lock_init(&cma_xprt->sc_lock); - spin_lock_init(&cma_xprt->sc_read_complete_lock); spin_lock_init(&cma_xprt->sc_rq_dto_lock); cma_xprt->sc_ord = svcrdma_ord; diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index b1ff16aa4bd..3ddaff42d1b 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -96,8 +96,8 @@ struct bcbearer { struct media media; struct bcbearer_pair bpairs[MAX_BEARERS]; struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; - struct node_map remains; - struct node_map remains_new; + struct tipc_node_map remains; + struct tipc_node_map remains_new; }; /** @@ -110,7 +110,7 @@ struct bcbearer { struct bclink { struct link link; - struct node node; + struct tipc_node node; }; @@ -149,7 +149,7 @@ static void bcbuf_decr_acks(struct sk_buff *buf) * Called with 'node' locked, bc_lock unlocked */ -static void bclink_set_gap(struct node *n_ptr) +static void bclink_set_gap(struct tipc_node *n_ptr) { struct sk_buff *buf = n_ptr->bclink.deferred_head; @@ -202,7 +202,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to) * Node is locked, bc_lock unlocked. */ -void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked) +void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) { struct sk_buff *crs; struct sk_buff *next; @@ -250,7 +250,7 @@ void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked) * tipc_net_lock and node lock set */ -static void bclink_send_ack(struct node *n_ptr) +static void bclink_send_ack(struct tipc_node *n_ptr) { struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; @@ -264,7 +264,7 @@ static void bclink_send_ack(struct node *n_ptr) * tipc_net_lock and node lock set */ -static void bclink_send_nack(struct node *n_ptr) +static void bclink_send_nack(struct tipc_node *n_ptr) { struct sk_buff *buf; struct tipc_msg *msg; @@ -308,7 +308,7 @@ static void bclink_send_nack(struct node *n_ptr) * tipc_net_lock and node lock set */ -void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent) +void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent) { if (!n_ptr->bclink.supported || less_eq(last_sent, mod(n_ptr->bclink.last_in))) @@ -328,7 +328,7 @@ void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent) static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) { - struct node *n_ptr = tipc_node_find(dest); + struct tipc_node *n_ptr = tipc_node_find(dest); u32 my_after, my_to; if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) @@ -418,7 +418,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) static int rx_count = 0; #endif struct tipc_msg *msg = buf_msg(buf); - struct node* node = tipc_node_find(msg_prevnode(msg)); + struct tipc_node* node = tipc_node_find(msg_prevnode(msg)); u32 next_in; u32 seqno; struct sk_buff *deferred; @@ -538,7 +538,7 @@ u32 tipc_bclink_get_last_sent(void) return last_sent; } -u32 tipc_bclink_acks_missing(struct node *n_ptr) +u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) { return (n_ptr->bclink.supported && (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index a2416fa6b90..5aa024b99c5 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -41,12 +41,12 @@ #define WSIZE 32 /** - * struct node_map - set of node identifiers + * struct tipc_node_map - set of node identifiers * @count: # of nodes in set * @map: bitmap of node identifiers that are in the set */ -struct node_map { +struct tipc_node_map { u32 count; u32 map[MAX_NODES / WSIZE]; }; @@ -68,7 +68,7 @@ struct port_list { }; -struct node; +struct tipc_node; extern char tipc_bclink_name[]; @@ -77,7 +77,7 @@ extern char tipc_bclink_name[]; * nmap_add - add a node to a node map */ -static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node) +static inline void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) { int n = tipc_node(node); int w = n / WSIZE; @@ -93,7 +93,7 @@ static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node) * nmap_remove - remove a node from a node map */ -static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node) +static inline void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) { int n = tipc_node(node); int w = n / WSIZE; @@ -109,7 +109,7 @@ static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node) * nmap_equal - test for equality of node maps */ -static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b) +static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) { return !memcmp(nm_a, nm_b, sizeof(*nm_a)); } @@ -121,8 +121,8 @@ static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b) * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) */ -static inline void tipc_nmap_diff(struct node_map *nm_a, struct node_map *nm_b, - struct node_map *nm_diff) +static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, + struct tipc_node_map *nm_diff) { int stop = sizeof(nm_a->map) / sizeof(u32); int w; @@ -195,12 +195,12 @@ static inline void tipc_port_list_free(struct port_list *pl_ptr) int tipc_bclink_init(void); void tipc_bclink_stop(void); -void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked); +void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked); int tipc_bclink_send_msg(struct sk_buff *buf); void tipc_bclink_recv_pkt(struct sk_buff *buf); u32 tipc_bclink_get_last_sent(void); -u32 tipc_bclink_acks_missing(struct node *n_ptr); -void tipc_bclink_check_gap(struct node *n_ptr, u32 seqno); +u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr); +void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 seqno); int tipc_bclink_stats(char *stats_buf, const u32 buf_size); int tipc_bclink_reset_stats(void); int tipc_bclink_set_queue_limits(u32 limit); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 6a9aba3edd0..a7a36779b9b 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -599,7 +599,7 @@ int tipc_block_bearer(const char *name) spin_lock_bh(&b_ptr->publ.lock); b_ptr->publ.blocked = 1; list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { - struct node *n_ptr = l_ptr->owner; + struct tipc_node *n_ptr = l_ptr->owner; spin_lock_bh(&n_ptr->lock); tipc_link_reset(l_ptr); diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 6a36b6600e6..ca573489271 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -104,7 +104,7 @@ struct bearer { u32 continue_count; int active; char net_plane; - struct node_map nodes; + struct tipc_node_map nodes; }; struct bearer_name { diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c index 46ee6c58532..689fdefe9d0 100644 --- a/net/tipc/cluster.c +++ b/net/tipc/cluster.c @@ -48,8 +48,8 @@ static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, u32 lower, u32 upper); static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest); -struct node **tipc_local_nodes = NULL; -struct node_map tipc_cltr_bcast_nodes = {0,{0,}}; +struct tipc_node **tipc_local_nodes = NULL; +struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}}; u32 tipc_highest_allowed_slave = 0; struct cluster *tipc_cltr_create(u32 addr) @@ -115,7 +115,7 @@ void tipc_cltr_delete(struct cluster *c_ptr) u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr) { - struct node *n_ptr; + struct tipc_node *n_ptr; u32 n_num = tipc_node(addr) + 1; if (!c_ptr) @@ -133,7 +133,7 @@ u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr) return 0; } -void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr) +void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr) { u32 n_num = tipc_node(n_ptr->addr); u32 max_n_num = tipc_max_nodes; @@ -196,7 +196,7 @@ u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref) * Uses deterministic and fair algorithm. */ -struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector) +struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector) { u32 n_num; u32 mask = tipc_max_nodes; @@ -379,7 +379,7 @@ void tipc_cltr_recv_routing_table(struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); struct cluster *c_ptr; - struct node *n_ptr; + struct tipc_node *n_ptr; unchar *node_table; u32 table_size; u32 router; @@ -499,7 +499,7 @@ static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, u32 lower, u32 upper) { struct sk_buff *buf_copy; - struct node *n_ptr; + struct tipc_node *n_ptr; u32 n_num; u32 tstop; @@ -534,7 +534,7 @@ void tipc_cltr_broadcast(struct sk_buff *buf) { struct sk_buff *buf_copy; struct cluster *c_ptr; - struct node *n_ptr; + struct tipc_node *n_ptr; u32 n_num; u32 tstart; u32 tstop; diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h index 62df074afae..333efb0b9c4 100644 --- a/net/tipc/cluster.h +++ b/net/tipc/cluster.h @@ -54,24 +54,24 @@ struct cluster { u32 addr; struct _zone *owner; - struct node **nodes; + struct tipc_node **nodes; u32 highest_node; u32 highest_slave; }; -extern struct node **tipc_local_nodes; +extern struct tipc_node **tipc_local_nodes; extern u32 tipc_highest_allowed_slave; -extern struct node_map tipc_cltr_bcast_nodes; +extern struct tipc_node_map tipc_cltr_bcast_nodes; void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router); void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest); -struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector); +struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector); u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref); void tipc_cltr_recv_routing_table(struct sk_buff *buf); struct cluster *tipc_cltr_create(u32 addr); void tipc_cltr_delete(struct cluster *c_ptr); -void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr); +void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr); void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest); void tipc_cltr_broadcast(struct sk_buff *buf); int tipc_cltr_init(void); diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 1657f0e795f..74b7d1e28ae 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -193,7 +193,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr) /* Always accept link here */ struct sk_buff *rbuf; struct tipc_media_addr *addr; - struct node *n_ptr = tipc_node_find(orig); + struct tipc_node *n_ptr = tipc_node_find(orig); int link_fully_up; dbg(" in own cluster\n"); diff --git a/net/tipc/link.c b/net/tipc/link.c index d60113ba4b1..dd4c18b9a35 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1155,7 +1155,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) { struct link *l_ptr; - struct node *n_ptr; + struct tipc_node *n_ptr; int res = -ELINKCONG; read_lock_bh(&tipc_net_lock); @@ -1226,7 +1226,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf, int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode) { struct link *l_ptr; - struct node *n_ptr; + struct tipc_node *n_ptr; int res; u32 selector = msg_origport(buf_msg(buf)) & 1; u32 dummy; @@ -1270,7 +1270,7 @@ int tipc_link_send_sections_fast(struct port *sender, struct tipc_msg *hdr = &sender->publ.phdr; struct link *l_ptr; struct sk_buff *buf; - struct node *node; + struct tipc_node *node; int res; u32 selector = msg_origport(hdr) & 1; @@ -1364,7 +1364,7 @@ static int link_send_sections_long(struct port *sender, u32 destaddr) { struct link *l_ptr; - struct node *node; + struct tipc_node *node; struct tipc_msg *hdr = &sender->publ.phdr; u32 dsz = msg_data_sz(hdr); u32 max_pkt,fragm_sz,rest; @@ -1636,7 +1636,7 @@ void tipc_link_push_queue(struct link *l_ptr) static void link_reset_all(unsigned long addr) { - struct node *n_ptr; + struct tipc_node *n_ptr; char addr_string[16]; u32 i; @@ -1682,7 +1682,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf) /* Handle failure on broadcast link */ - struct node *n_ptr; + struct tipc_node *n_ptr; char addr_string[16]; tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg)); @@ -1843,7 +1843,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) read_lock_bh(&tipc_net_lock); while (head) { struct bearer *b_ptr = (struct bearer *)tb_ptr; - struct node *n_ptr; + struct tipc_node *n_ptr; struct link *l_ptr; struct sk_buff *crs; struct sk_buff *buf = head; @@ -2935,7 +2935,7 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window) * Returns pointer to link (or 0 if invalid link name). */ -static struct link *link_find_link(const char *name, struct node **node) +static struct link *link_find_link(const char *name, struct tipc_node **node) { struct link_name link_name_parts; struct bearer *b_ptr; @@ -2965,7 +2965,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space struct tipc_link_config *args; u32 new_value; struct link *l_ptr; - struct node *node; + struct tipc_node *node; int res; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) @@ -3043,7 +3043,7 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_ { char *link_name; struct link *l_ptr; - struct node *node; + struct tipc_node *node; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); @@ -3091,7 +3091,7 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) { struct print_buf pb; struct link *l_ptr; - struct node *node; + struct tipc_node *node; char *status; u32 profile_total = 0; @@ -3207,7 +3207,7 @@ int link_control(const char *name, u32 op, u32 val) int res = -EINVAL; struct link *l_ptr; u32 bearer_id; - struct node * node; + struct tipc_node * node; u32 a; a = link_name2addr(name, &bearer_id); @@ -3249,7 +3249,7 @@ int link_control(const char *name, u32 op, u32 val) u32 tipc_link_get_max_pkt(u32 dest, u32 selector) { - struct node *n_ptr; + struct tipc_node *n_ptr; struct link *l_ptr; u32 res = MAX_PKT_DEFAULT; diff --git a/net/tipc/link.h b/net/tipc/link.h index 52f3e7c1871..6a51e38ad25 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -116,7 +116,7 @@ struct link { char name[TIPC_MAX_LINK_NAME]; struct tipc_media_addr media_addr; struct timer_list timer; - struct node *owner; + struct tipc_node *owner; struct list_head link_list; /* Management and link supervision data */ diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index b9e7cd336d7..139882d4ed0 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -76,7 +76,7 @@ struct publication { u32 node; u32 ref; u32 key; - struct node_subscr subscr; + struct tipc_node_subscr subscr; struct list_head local_list; struct list_head pport_list; struct publication *node_list_next; diff --git a/net/tipc/net.c b/net/tipc/net.c index ec7b04fbdc4..7906608bf51 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -118,7 +118,7 @@ DEFINE_RWLOCK(tipc_net_lock); struct network tipc_net = { NULL }; -struct node *tipc_net_select_remote_node(u32 addr, u32 ref) +struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) { return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref); } diff --git a/net/tipc/net.h b/net/tipc/net.h index d154ac2bda9..de2b9ad8f64 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h @@ -55,7 +55,7 @@ extern rwlock_t tipc_net_lock; void tipc_net_remove_as_router(u32 router); void tipc_net_send_external_routes(u32 dest); void tipc_net_route_msg(struct sk_buff *buf); -struct node *tipc_net_select_remote_node(u32 addr, u32 ref); +struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref); u32 tipc_net_select_router(u32 addr, u32 ref); int tipc_net_start(u32 addr); diff --git a/net/tipc/node.c b/net/tipc/node.c index ee952ad6021..20d98c56e15 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -46,11 +46,11 @@ #include "bearer.h" #include "name_distr.h" -void node_print(struct print_buf *buf, struct node *n_ptr, char *str); -static void node_lost_contact(struct node *n_ptr); -static void node_established_contact(struct node *n_ptr); +void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str); +static void node_lost_contact(struct tipc_node *n_ptr); +static void node_established_contact(struct tipc_node *n_ptr); -struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ +struct tipc_node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ static DEFINE_SPINLOCK(node_create_lock); @@ -66,11 +66,11 @@ u32 tipc_own_tag = 0; * but this is a non-trivial change.) */ -struct node *tipc_node_create(u32 addr) +struct tipc_node *tipc_node_create(u32 addr) { struct cluster *c_ptr; - struct node *n_ptr; - struct node **curr_node; + struct tipc_node *n_ptr; + struct tipc_node **curr_node; spin_lock_bh(&node_create_lock); @@ -120,7 +120,7 @@ struct node *tipc_node_create(u32 addr) return n_ptr; } -void tipc_node_delete(struct node *n_ptr) +void tipc_node_delete(struct tipc_node *n_ptr) { if (!n_ptr) return; @@ -146,7 +146,7 @@ void tipc_node_delete(struct node *n_ptr) * Link becomes active (alone or shared) or standby, depending on its priority. */ -void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr) +void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr) { struct link **active = &n_ptr->active_links[0]; @@ -180,7 +180,7 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr) * node_select_active_links - select active link */ -static void node_select_active_links(struct node *n_ptr) +static void node_select_active_links(struct tipc_node *n_ptr) { struct link **active = &n_ptr->active_links[0]; u32 i; @@ -208,7 +208,7 @@ static void node_select_active_links(struct node *n_ptr) * tipc_node_link_down - handle loss of link */ -void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr) +void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr) { struct link **active; @@ -235,30 +235,30 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr) node_lost_contact(n_ptr); } -int tipc_node_has_active_links(struct node *n_ptr) +int tipc_node_has_active_links(struct tipc_node *n_ptr) { return (n_ptr && ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); } -int tipc_node_has_redundant_links(struct node *n_ptr) +int tipc_node_has_redundant_links(struct tipc_node *n_ptr) { return (n_ptr->working_links > 1); } -static int tipc_node_has_active_routes(struct node *n_ptr) +static int tipc_node_has_active_routes(struct tipc_node *n_ptr) { return (n_ptr && (n_ptr->last_router >= 0)); } -int tipc_node_is_up(struct node *n_ptr) +int tipc_node_is_up(struct tipc_node *n_ptr) { return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr)); } -struct node *tipc_node_attach_link(struct link *l_ptr) +struct tipc_node *tipc_node_attach_link(struct link *l_ptr) { - struct node *n_ptr = tipc_node_find(l_ptr->addr); + struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr); if (!n_ptr) n_ptr = tipc_node_create(l_ptr->addr); @@ -285,7 +285,7 @@ struct node *tipc_node_attach_link(struct link *l_ptr) return NULL; } -void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr) +void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr) { n_ptr->links[l_ptr->b_ptr->identity] = NULL; tipc_net.zones[tipc_zone(l_ptr->addr)]->links--; @@ -338,7 +338,7 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr) * */ -static void node_established_contact(struct node *n_ptr) +static void node_established_contact(struct tipc_node *n_ptr) { struct cluster *c_ptr; @@ -384,10 +384,10 @@ static void node_established_contact(struct node *n_ptr) tipc_highest_allowed_slave); } -static void node_lost_contact(struct node *n_ptr) +static void node_lost_contact(struct tipc_node *n_ptr) { struct cluster *c_ptr; - struct node_subscr *ns, *tns; + struct tipc_node_subscr *ns, *tns; char addr_string[16]; u32 i; @@ -466,9 +466,9 @@ static void node_lost_contact(struct node *n_ptr) * Called by when cluster local lookup has failed. */ -struct node *tipc_node_select_next_hop(u32 addr, u32 selector) +struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector) { - struct node *n_ptr; + struct tipc_node *n_ptr; u32 router_addr; if (!tipc_addr_domain_valid(addr)) @@ -513,7 +513,7 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector) * Uses a deterministic and fair algorithm for selecting router node. */ -u32 tipc_node_select_router(struct node *n_ptr, u32 ref) +u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref) { u32 ulim; u32 mask; @@ -551,7 +551,7 @@ u32 tipc_node_select_router(struct node *n_ptr, u32 ref) return tipc_addr(own_zone(), own_cluster(), r); } -void tipc_node_add_router(struct node *n_ptr, u32 router) +void tipc_node_add_router(struct tipc_node *n_ptr, u32 router) { u32 r_num = tipc_node(router); @@ -562,7 +562,7 @@ void tipc_node_add_router(struct node *n_ptr, u32 router) !n_ptr->routers[n_ptr->last_router]); } -void tipc_node_remove_router(struct node *n_ptr, u32 router) +void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router) { u32 r_num = tipc_node(router); @@ -580,7 +580,7 @@ void tipc_node_remove_router(struct node *n_ptr, u32 router) } #if 0 -void node_print(struct print_buf *buf, struct node *n_ptr, char *str) +void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str) { u32 i; @@ -597,7 +597,7 @@ void node_print(struct print_buf *buf, struct node *n_ptr, char *str) u32 tipc_available_nodes(const u32 domain) { - struct node *n_ptr; + struct tipc_node *n_ptr; u32 cnt = 0; read_lock_bh(&tipc_net_lock); @@ -615,7 +615,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) { u32 domain; struct sk_buff *buf; - struct node *n_ptr; + struct tipc_node *n_ptr; struct tipc_node_info node_info; u32 payload_size; @@ -667,7 +667,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) { u32 domain; struct sk_buff *buf; - struct node *n_ptr; + struct tipc_node *n_ptr; struct tipc_link_info link_info; u32 payload_size; diff --git a/net/tipc/node.h b/net/tipc/node.h index cd1882654bb..6f990da5d14 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -43,7 +43,7 @@ #include "bearer.h" /** - * struct node - TIPC node structure + * struct tipc_node - TIPC node structure * @addr: network address of node * @lock: spinlock governing access to structure * @owner: pointer to cluster that node belongs to @@ -68,11 +68,11 @@ * @defragm: list of partially reassembled b'cast message fragments from node */ -struct node { +struct tipc_node { u32 addr; spinlock_t lock; struct cluster *owner; - struct node *next; + struct tipc_node *next; struct list_head nsub; struct link *active_links[2]; struct link *links[MAX_BEARERS]; @@ -94,26 +94,26 @@ struct node { } bclink; }; -extern struct node *tipc_nodes; +extern struct tipc_node *tipc_nodes; extern u32 tipc_own_tag; -struct node *tipc_node_create(u32 addr); -void tipc_node_delete(struct node *n_ptr); -struct node *tipc_node_attach_link(struct link *l_ptr); -void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr); -void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr); -void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr); -int tipc_node_has_active_links(struct node *n_ptr); -int tipc_node_has_redundant_links(struct node *n_ptr); -u32 tipc_node_select_router(struct node *n_ptr, u32 ref); -struct node *tipc_node_select_next_hop(u32 addr, u32 selector); -int tipc_node_is_up(struct node *n_ptr); -void tipc_node_add_router(struct node *n_ptr, u32 router); -void tipc_node_remove_router(struct node *n_ptr, u32 router); +struct tipc_node *tipc_node_create(u32 addr); +void tipc_node_delete(struct tipc_node *n_ptr); +struct tipc_node *tipc_node_attach_link(struct link *l_ptr); +void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr); +void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr); +void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr); +int tipc_node_has_active_links(struct tipc_node *n_ptr); +int tipc_node_has_redundant_links(struct tipc_node *n_ptr); +u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref); +struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector); +int tipc_node_is_up(struct tipc_node *n_ptr); +void tipc_node_add_router(struct tipc_node *n_ptr, u32 router); +void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router); struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); -static inline struct node *tipc_node_find(u32 addr) +static inline struct tipc_node *tipc_node_find(u32 addr) { if (likely(in_own_cluster(addr))) return tipc_local_nodes[tipc_node(addr)]; @@ -126,19 +126,19 @@ static inline struct node *tipc_node_find(u32 addr) return NULL; } -static inline struct node *tipc_node_select(u32 addr, u32 selector) +static inline struct tipc_node *tipc_node_select(u32 addr, u32 selector) { if (likely(in_own_cluster(addr))) return tipc_local_nodes[tipc_node(addr)]; return tipc_node_select_next_hop(addr, selector); } -static inline void tipc_node_lock(struct node *n_ptr) +static inline void tipc_node_lock(struct tipc_node *n_ptr) { spin_lock_bh(&n_ptr->lock); } -static inline void tipc_node_unlock(struct node *n_ptr) +static inline void tipc_node_unlock(struct tipc_node *n_ptr) { spin_unlock_bh(&n_ptr->lock); } diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c index 8ecbd0fb610..19194d476a9 100644 --- a/net/tipc/node_subscr.c +++ b/net/tipc/node_subscr.c @@ -44,7 +44,7 @@ * tipc_nodesub_subscribe - create "node down" subscription for specified node */ -void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, +void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, void *usr_handle, net_ev_handler handle_down) { if (addr == tipc_own_addr) { @@ -69,7 +69,7 @@ void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) */ -void tipc_nodesub_unsubscribe(struct node_subscr *node_sub) +void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) { if (!node_sub->node) return; diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h index 5f3f5859b84..006ed739f51 100644 --- a/net/tipc/node_subscr.h +++ b/net/tipc/node_subscr.h @@ -42,22 +42,22 @@ typedef void (*net_ev_handler) (void *usr_handle); /** - * struct node_subscr - "node down" subscription entry + * struct tipc_node_subscr - "node down" subscription entry * @node: ptr to node structure of interest (or NULL, if none) * @handle_node_down: routine to invoke when node fails * @usr_handle: argument to pass to routine when node fails * @nodesub_list: adjacent entries in list of subscriptions for the node */ -struct node_subscr { - struct node *node; +struct tipc_node_subscr { + struct tipc_node *node; net_ev_handler handle_node_down; void *usr_handle; struct list_head nodesub_list; }; -void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, +void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, void *usr_handle, net_ev_handler handle_down); -void tipc_nodesub_unsubscribe(struct node_subscr *node_sub); +void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub); #endif diff --git a/net/tipc/port.h b/net/tipc/port.h index e5f8c16429b..ff31ee4a1dc 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h @@ -105,7 +105,7 @@ struct port { u32 probing_interval; u32 last_in_seqno; struct timer_list timer; - struct node_subscr subscription; + struct tipc_node_subscr subscription; }; extern spinlock_t tipc_port_list_lock; diff --git a/net/tipc/zone.c b/net/tipc/zone.c index 3506f856344..2c01ba2d86b 100644 --- a/net/tipc/zone.c +++ b/net/tipc/zone.c @@ -111,10 +111,10 @@ void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest) } } -struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref) +struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref) { struct cluster *c_ptr; - struct node *n_ptr; + struct tipc_node *n_ptr; u32 c_num; if (!z_ptr) diff --git a/net/tipc/zone.h b/net/tipc/zone.h index 6e7a08df8af..7bdc3406ba9 100644 --- a/net/tipc/zone.h +++ b/net/tipc/zone.h @@ -54,7 +54,7 @@ struct _zone { u32 links; }; -struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref); +struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref); u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref); void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router); void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest); diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index ab015c62d56..833b024f8f6 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -39,4 +39,5 @@ config WIRELESS_EXT_SYSFS files in /sys/class/net/*/wireless/. The same information is available via the ioctls as well. - Say Y if you have programs using it (we don't know of any). + Say Y if you have programs using it, like old versions of + hal. diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 841b32a2e68..46914b79d85 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1731,8 +1731,7 @@ restart: * We can't enlist stable bundles either. */ write_unlock_bh(&policy->lock); - if (dst) - dst_free(dst); + dst_free(dst); if (pol_dead) XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD); @@ -1748,8 +1747,7 @@ restart: err = xfrm_dst_update_origin(dst, fl); if (unlikely(err)) { write_unlock_bh(&policy->lock); - if (dst) - dst_free(dst); + dst_free(dst); XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR); goto error; } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 4c6914ef7d9..7bd62f61593 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -780,11 +780,13 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, { unsigned int h; struct hlist_node *entry; - struct xfrm_state *x, *x0; + struct xfrm_state *x, *x0, *to_put; int acquire_in_progress = 0; int error = 0; struct xfrm_state *best = NULL; + to_put = NULL; + spin_lock_bh(&xfrm_state_lock); h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family); hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { @@ -833,7 +835,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, if (tmpl->id.spi && (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi, tmpl->id.proto, family)) != NULL) { - xfrm_state_put(x0); + to_put = x0; error = -EEXIST; goto out; } @@ -849,7 +851,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); if (error) { x->km.state = XFRM_STATE_DEAD; - xfrm_state_put(x); + to_put = x; x = NULL; goto out; } @@ -870,7 +872,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, xfrm_hash_grow_check(x->bydst.next != NULL); } else { x->km.state = XFRM_STATE_DEAD; - xfrm_state_put(x); + to_put = x; x = NULL; error = -ESRCH; } @@ -881,6 +883,8 @@ out: else *err = acquire_in_progress ? -EAGAIN : error; spin_unlock_bh(&xfrm_state_lock); + if (to_put) + xfrm_state_put(to_put); return x; } @@ -1067,18 +1071,20 @@ static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq); int xfrm_state_add(struct xfrm_state *x) { - struct xfrm_state *x1; + struct xfrm_state *x1, *to_put; int family; int err; int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); family = x->props.family; + to_put = NULL; + spin_lock_bh(&xfrm_state_lock); x1 = __xfrm_state_locate(x, use_spi, family); if (x1) { - xfrm_state_put(x1); + to_put = x1; x1 = NULL; err = -EEXIST; goto out; @@ -1088,7 +1094,7 @@ int xfrm_state_add(struct xfrm_state *x) x1 = __xfrm_find_acq_byseq(x->km.seq); if (x1 && ((x1->id.proto != x->id.proto) || xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { - xfrm_state_put(x1); + to_put = x1; x1 = NULL; } } @@ -1110,6 +1116,9 @@ out: xfrm_state_put(x1); } + if (to_put) + xfrm_state_put(to_put); + return err; } EXPORT_SYMBOL(xfrm_state_add); @@ -1269,10 +1278,12 @@ EXPORT_SYMBOL(xfrm_state_migrate); int xfrm_state_update(struct xfrm_state *x) { - struct xfrm_state *x1; + struct xfrm_state *x1, *to_put; int err; int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); + to_put = NULL; + spin_lock_bh(&xfrm_state_lock); x1 = __xfrm_state_locate(x, use_spi, x->props.family); @@ -1281,7 +1292,7 @@ int xfrm_state_update(struct xfrm_state *x) goto out; if (xfrm_state_kern(x1)) { - xfrm_state_put(x1); + to_put = x1; err = -EEXIST; goto out; } @@ -1295,6 +1306,9 @@ int xfrm_state_update(struct xfrm_state *x) out: spin_unlock_bh(&xfrm_state_lock); + if (to_put) + xfrm_state_put(to_put); + if (err) return err; |