diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 181 | ||||
-rw-r--r-- | net/core/dev.c | 230 | ||||
-rw-r--r-- | net/core/iovec.c | 4 | ||||
-rw-r--r-- | net/core/neighbour.c | 57 | ||||
-rw-r--r-- | net/core/pktgen.c | 6 | ||||
-rw-r--r-- | net/core/skb_dma_map.c | 13 | ||||
-rw-r--r-- | net/core/skbuff.c | 241 | ||||
-rw-r--r-- | net/core/sock.c | 135 | ||||
-rw-r--r-- | net/core/user_dma.c | 46 |
9 files changed, 491 insertions, 422 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index e2a36f05cdf..58abee1f1df 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -282,6 +282,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, { int start = skb_headlen(skb); int i, copy = start - offset; + struct sk_buff *frag_iter; /* Copy header. */ if (copy > 0) { @@ -322,28 +323,24 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, start = end; } - if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; - - for (; list; list = list->next) { - int end; - - WARN_ON(start > offset + len); - - end = start + list->len; - if ((copy = end - offset) > 0) { - if (copy > len) - copy = len; - if (skb_copy_datagram_iovec(list, - offset - start, - to, copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - } - start = end; + skb_walk_frags(skb, frag_iter) { + int end; + + WARN_ON(start > offset + len); + + end = start + frag_iter->len; + if ((copy = end - offset) > 0) { + if (copy > len) + copy = len; + if (skb_copy_datagram_iovec(frag_iter, + offset - start, + to, copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; } + start = end; } if (!len) return 0; @@ -369,6 +366,7 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, { int start = skb_headlen(skb); int i, copy = start - offset; + struct sk_buff *frag_iter; /* Copy header. */ if (copy > 0) { @@ -411,30 +409,26 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, start = end; } - if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; - - for (; list; list = list->next) { - int end; - - WARN_ON(start > offset + len); - - end = start + list->len; - if ((copy = end - offset) > 0) { - if (copy > len) - copy = len; - if (skb_copy_datagram_const_iovec(list, - offset - start, - to, to_offset, - copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - to_offset += copy; - } - start = end; + skb_walk_frags(skb, frag_iter) { + int end; + + WARN_ON(start > offset + len); + + end = start + frag_iter->len; + if ((copy = end - offset) > 0) { + if (copy > len) + copy = len; + if (skb_copy_datagram_const_iovec(frag_iter, + offset - start, + to, to_offset, + copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + to_offset += copy; } + start = end; } if (!len) return 0; @@ -461,12 +455,14 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, { int start = skb_headlen(skb); int i, copy = start - offset; + struct sk_buff *frag_iter; /* Copy header. */ if (copy > 0) { if (copy > len) copy = len; - if (memcpy_fromiovecend(skb->data + offset, from, 0, copy)) + if (memcpy_fromiovecend(skb->data + offset, from, from_offset, + copy)) goto fault; if ((len -= copy) == 0) return 0; @@ -505,31 +501,27 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, start = end; } - if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; - - for (; list; list = list->next) { - int end; - - WARN_ON(start > offset + len); - - end = start + list->len; - if ((copy = end - offset) > 0) { - if (copy > len) - copy = len; - if (skb_copy_datagram_from_iovec(list, - offset - start, - from, - from_offset, - copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - from_offset += copy; - } - start = end; + skb_walk_frags(skb, frag_iter) { + int end; + + WARN_ON(start > offset + len); + + end = start + frag_iter->len; + if ((copy = end - offset) > 0) { + if (copy > len) + copy = len; + if (skb_copy_datagram_from_iovec(frag_iter, + offset - start, + from, + from_offset, + copy)) + goto fault; + if ((len -= copy) == 0) + return 0; + offset += copy; + from_offset += copy; } + start = end; } if (!len) return 0; @@ -544,8 +536,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, __wsum *csump) { int start = skb_headlen(skb); - int pos = 0; int i, copy = start - offset; + struct sk_buff *frag_iter; + int pos = 0; /* Copy header. */ if (copy > 0) { @@ -596,33 +589,29 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, start = end; } - if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; - - for (; list; list=list->next) { - int end; - - WARN_ON(start > offset + len); - - end = start + list->len; - if ((copy = end - offset) > 0) { - __wsum csum2 = 0; - if (copy > len) - copy = len; - if (skb_copy_and_csum_datagram(list, - offset - start, - to, copy, - &csum2)) - goto fault; - *csump = csum_block_add(*csump, csum2, pos); - if ((len -= copy) == 0) - return 0; - offset += copy; - to += copy; - pos += copy; - } - start = end; + skb_walk_frags(skb, frag_iter) { + int end; + + WARN_ON(start > offset + len); + + end = start + frag_iter->len; + if ((copy = end - offset) > 0) { + __wsum csum2 = 0; + if (copy > len) + copy = len; + if (skb_copy_and_csum_datagram(frag_iter, + offset - start, + to, copy, + &csum2)) + goto fault; + *csump = csum_block_add(*csump, csum2, pos); + if ((len -= copy) == 0) + return 0; + offset += copy; + to += copy; + pos += copy; } + start = end; } if (!len) return 0; diff --git a/net/core/dev.c b/net/core/dev.c index ed4550fd9ec..11560e3258b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -269,7 +269,8 @@ static const unsigned short netdev_lock_type[] = ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, - ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; + ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY, + ARPHRD_VOID, ARPHRD_NONE}; static const char *netdev_lock_name[] = {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", @@ -286,7 +287,8 @@ static const char *netdev_lock_name[] = "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", - "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; + "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY", + "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; @@ -1048,7 +1050,7 @@ void dev_load(struct net *net, const char *name) int dev_open(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; - int ret = 0; + int ret; ASSERT_RTNL(); @@ -1065,6 +1067,11 @@ int dev_open(struct net_device *dev) if (!netif_device_present(dev)) return -ENODEV; + ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); + ret = notifier_to_errno(ret); + if (ret) + return ret; + /* * Call device private open method */ @@ -1693,10 +1700,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, * If device doesnt need skb->dst, release it right now while * its hot in this cpu cache */ - if ((dev->priv_flags & IFF_XMIT_DST_RELEASE) && skb->dst) { - dst_release(skb->dst); - skb->dst = NULL; - } + if (dev->priv_flags & IFF_XMIT_DST_RELEASE) + skb_dst_drop(skb); + rc = ops->ndo_start_xmit(skb, dev); if (rc == 0) txq_trans_update(txq); @@ -1816,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb) if (netif_needs_gso(dev, skb)) goto gso; - if (skb_shinfo(skb)->frag_list && + if (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST) && __skb_linearize(skb)) goto out_kfree_skb; @@ -2403,7 +2409,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) if (!(skb->dev->features & NETIF_F_GRO)) goto normal; - if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) + if (skb_is_gso(skb) || skb_has_frags(skb)) goto normal; rcu_read_lock(); @@ -3473,8 +3479,9 @@ void dev_set_rx_mode(struct net_device *dev) /* hw addresses list handling functions */ -static int __hw_addr_add(struct list_head *list, unsigned char *addr, - int addr_len, unsigned char addr_type) +static int __hw_addr_add(struct list_head *list, int *delta, + unsigned char *addr, int addr_len, + unsigned char addr_type) { struct netdev_hw_addr *ha; int alloc_size; @@ -3482,6 +3489,15 @@ static int __hw_addr_add(struct list_head *list, unsigned char *addr, if (addr_len > MAX_ADDR_LEN) return -EINVAL; + list_for_each_entry(ha, list, list) { + if (!memcmp(ha->addr, addr, addr_len) && + ha->type == addr_type) { + ha->refcount++; + return 0; + } + } + + alloc_size = sizeof(*ha); if (alloc_size < L1_CACHE_BYTES) alloc_size = L1_CACHE_BYTES; @@ -3490,7 +3506,11 @@ static int __hw_addr_add(struct list_head *list, unsigned char *addr, return -ENOMEM; memcpy(ha->addr, addr, addr_len); ha->type = addr_type; + ha->refcount = 1; + ha->synced = false; list_add_tail_rcu(&ha->list, list); + if (delta) + (*delta)++; return 0; } @@ -3502,29 +3522,30 @@ static void ha_rcu_free(struct rcu_head *head) kfree(ha); } -static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr, - int addr_len, unsigned char addr_type, - int ignore_index) +static int __hw_addr_del(struct list_head *list, int *delta, + unsigned char *addr, int addr_len, + unsigned char addr_type) { struct netdev_hw_addr *ha; - int i = 0; list_for_each_entry(ha, list, list) { - if (i++ != ignore_index && - !memcmp(ha->addr, addr, addr_len) && + if (!memcmp(ha->addr, addr, addr_len) && (ha->type == addr_type || !addr_type)) { + if (--ha->refcount) + return 0; list_del_rcu(&ha->list); call_rcu(&ha->rcu_head, ha_rcu_free); + if (delta) + (*delta)--; return 0; } } return -ENOENT; } -static int __hw_addr_add_multiple_ii(struct list_head *to_list, - struct list_head *from_list, - int addr_len, unsigned char addr_type, - int ignore_index) +static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta, + struct list_head *from_list, int addr_len, + unsigned char addr_type) { int err; struct netdev_hw_addr *ha, *ha2; @@ -3532,7 +3553,8 @@ static int __hw_addr_add_multiple_ii(struct list_head *to_list, list_for_each_entry(ha, from_list, list) { type = addr_type ? addr_type : ha->type; - err = __hw_addr_add(to_list, ha->addr, addr_len, type); + err = __hw_addr_add(to_list, to_delta, ha->addr, + addr_len, type); if (err) goto unroll; } @@ -3543,27 +3565,69 @@ unroll: if (ha2 == ha) break; type = addr_type ? addr_type : ha2->type; - __hw_addr_del_ii(to_list, ha2->addr, addr_len, type, - ignore_index); + __hw_addr_del(to_list, to_delta, ha2->addr, + addr_len, type); } return err; } -static void __hw_addr_del_multiple_ii(struct list_head *to_list, - struct list_head *from_list, - int addr_len, unsigned char addr_type, - int ignore_index) +static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta, + struct list_head *from_list, int addr_len, + unsigned char addr_type) { struct netdev_hw_addr *ha; unsigned char type; list_for_each_entry(ha, from_list, list) { type = addr_type ? addr_type : ha->type; - __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type, - ignore_index); + __hw_addr_del(to_list, to_delta, ha->addr, + addr_len, addr_type); + } +} + +static int __hw_addr_sync(struct list_head *to_list, int *to_delta, + struct list_head *from_list, int *from_delta, + int addr_len) +{ + int err = 0; + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, from_list, list) { + if (!ha->synced) { + err = __hw_addr_add(to_list, to_delta, ha->addr, + addr_len, ha->type); + if (err) + break; + ha->synced = true; + ha->refcount++; + } else if (ha->refcount == 1) { + __hw_addr_del(to_list, to_delta, ha->addr, + addr_len, ha->type); + __hw_addr_del(from_list, from_delta, ha->addr, + addr_len, ha->type); + } + } + return err; +} + +static void __hw_addr_unsync(struct list_head *to_list, int *to_delta, + struct list_head *from_list, int *from_delta, + int addr_len) +{ + struct netdev_hw_addr *ha, *tmp; + + list_for_each_entry_safe(ha, tmp, from_list, list) { + if (ha->synced) { + __hw_addr_del(to_list, to_delta, ha->addr, + addr_len, ha->type); + ha->synced = false; + __hw_addr_del(from_list, from_delta, ha->addr, + addr_len, ha->type); + } } } + static void __hw_addr_flush(struct list_head *list) { struct netdev_hw_addr *ha, *tmp; @@ -3593,8 +3657,8 @@ static int dev_addr_init(struct net_device *dev) /* rtnl_mutex must be held here */ INIT_LIST_HEAD(&dev->dev_addr_list); - memset(addr, 0, sizeof(*addr)); - err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr), + memset(addr, 0, sizeof(addr)); + err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr), NETDEV_HW_ADDR_T_LAN); if (!err) { /* @@ -3626,7 +3690,7 @@ int dev_addr_add(struct net_device *dev, unsigned char *addr, ASSERT_RTNL(); - err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len, + err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); @@ -3649,11 +3713,20 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr, unsigned char addr_type) { int err; + struct netdev_hw_addr *ha; ASSERT_RTNL(); - err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len, - addr_type, 0); + /* + * We can not remove the first address from the list because + * dev->dev_addr points to that. + */ + ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list); + if (ha->addr == dev->dev_addr && ha->refcount == 1) + return -ENOENT; + + err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len, + addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; @@ -3680,9 +3753,9 @@ int dev_addr_add_multiple(struct net_device *to_dev, if (from_dev->addr_len != to_dev->addr_len) return -EINVAL; - err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list, - &from_dev->dev_addr_list, - to_dev->addr_len, addr_type, 0); + err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL, + &from_dev->dev_addr_list, + to_dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); return err; @@ -3707,9 +3780,9 @@ int dev_addr_del_multiple(struct net_device *to_dev, if (from_dev->addr_len != to_dev->addr_len) return -EINVAL; - __hw_addr_del_multiple_ii(&to_dev->dev_addr_list, - &from_dev->dev_addr_list, - to_dev->addr_len, addr_type, 0); + __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL, + &from_dev->dev_addr_list, + to_dev->addr_len, addr_type); call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); return 0; } @@ -3779,24 +3852,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count, * dev_unicast_delete - Release secondary unicast address. * @dev: device * @addr: address to delete - * @alen: length of @addr * * Release reference to a secondary unicast address and remove it * from the device if the reference count drops to zero. * * The caller must hold the rtnl_mutex. */ -int dev_unicast_delete(struct net_device *dev, void *addr, int alen) +int dev_unicast_delete(struct net_device *dev, void *addr) { int err; ASSERT_RTNL(); - netif_addr_lock_bh(dev); - err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); + err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr, + dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); - netif_addr_unlock_bh(dev); return err; } EXPORT_SYMBOL(dev_unicast_delete); @@ -3805,24 +3876,22 @@ EXPORT_SYMBOL(dev_unicast_delete); * dev_unicast_add - add a secondary unicast address * @dev: device * @addr: address to add - * @alen: length of @addr * * Add a secondary unicast address to the device or increase * the reference count if it already exists. * * The caller must hold the rtnl_mutex. */ -int dev_unicast_add(struct net_device *dev, void *addr, int alen) +int dev_unicast_add(struct net_device *dev, void *addr) { int err; ASSERT_RTNL(); - netif_addr_lock_bh(dev); - err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); + err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr, + dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); - netif_addr_unlock_bh(dev); return err; } EXPORT_SYMBOL(dev_unicast_add); @@ -3879,8 +3948,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, * @from: source device * * Add newly added addresses to the destination device and release - * addresses that have no users left. The source device must be - * locked by netif_tx_lock_bh. + * addresses that have no users left. * * This function is intended to be called from the dev->set_rx_mode * function of layered software devices. @@ -3889,12 +3957,15 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) { int err = 0; - netif_addr_lock_bh(to); - err = __dev_addr_sync(&to->uc_list, &to->uc_count, - &from->uc_list, &from->uc_count); + ASSERT_RTNL(); + + if (to->addr_len != from->addr_len) + return -EINVAL; + + err = __hw_addr_sync(&to->uc_list, &to->uc_count, + &from->uc_list, &from->uc_count, to->addr_len); if (!err) __dev_set_rx_mode(to); - netif_addr_unlock_bh(to); return err; } EXPORT_SYMBOL(dev_unicast_sync); @@ -3910,18 +3981,33 @@ EXPORT_SYMBOL(dev_unicast_sync); */ void dev_unicast_unsync(struct net_device *to, struct net_device *from) { - netif_addr_lock_bh(from); - netif_addr_lock(to); + ASSERT_RTNL(); - __dev_addr_unsync(&to->uc_list, &to->uc_count, - &from->uc_list, &from->uc_count); - __dev_set_rx_mode(to); + if (to->addr_len != from->addr_len) + return; - netif_addr_unlock(to); - netif_addr_unlock_bh(from); + __hw_addr_unsync(&to->uc_list, &to->uc_count, + &from->uc_list, &from->uc_count, to->addr_len); + __dev_set_rx_mode(to); } EXPORT_SYMBOL(dev_unicast_unsync); +static void dev_unicast_flush(struct net_device *dev) +{ + /* rtnl_mutex must be held here */ + + __hw_addr_flush(&dev->uc_list); + dev->uc_count = 0; +} + +static void dev_unicast_init(struct net_device *dev) +{ + /* rtnl_mutex must be held here */ + + INIT_LIST_HEAD(&dev->uc_list); +} + + static void __dev_addr_discard(struct dev_addr_list **list) { struct dev_addr_list *tmp; @@ -3940,9 +4026,6 @@ static void dev_addr_discard(struct net_device *dev) { netif_addr_lock_bh(dev); - __dev_addr_discard(&dev->uc_list); - dev->uc_count = 0; - __dev_addr_discard(&dev->mc_list); dev->mc_count = 0; @@ -4535,6 +4618,7 @@ static void rollback_registered(struct net_device *dev) /* * Flush the unicast and multicast chains */ + dev_unicast_flush(dev); dev_addr_discard(dev); if (dev->netdev_ops->ndo_uninit) @@ -4988,18 +5072,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, struct netdev_queue *tx; struct net_device *dev; size_t alloc_size; - void *p; + struct net_device *p; BUG_ON(strlen(name) >= sizeof(dev->name)); alloc_size = sizeof(struct net_device); if (sizeof_priv) { /* ensure 32-byte alignment of private area */ - alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; + alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); alloc_size += sizeof_priv; } /* ensure 32-byte alignment of whole construct */ - alloc_size += NETDEV_ALIGN_CONST; + alloc_size += NETDEV_ALIGN - 1; p = kzalloc(alloc_size, GFP_KERNEL); if (!p) { @@ -5014,13 +5098,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, goto free_p; } - dev = (struct net_device *) - (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); + dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; if (dev_addr_init(dev)) goto free_tx; + dev_unicast_init(dev); + dev_net_set(dev, &init_net); dev->_tx = tx; @@ -5224,6 +5309,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* * Flush the unicast and multicast chains */ + dev_unicast_flush(dev); dev_addr_discard(dev); netdev_unregister_kobject(dev); diff --git a/net/core/iovec.c b/net/core/iovec.c index 40a76ce19d9..16ad45d4882 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -112,9 +112,9 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, continue; } copy = min_t(unsigned int, iov->iov_len - offset, len); - offset = 0; - if (copy_to_user(iov->iov_base, kdata, copy)) + if (copy_to_user(iov->iov_base + offset, kdata, copy)) return -EFAULT; + offset = 0; kdata += copy; len -= copy; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index a1cbce7fdae..163b4f5b036 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -771,6 +771,28 @@ static __inline__ int neigh_max_probes(struct neighbour *n) p->ucast_probes + p->app_probes + p->mcast_probes); } +static void neigh_invalidate(struct neighbour *neigh) +{ + struct sk_buff *skb; + + NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); + NEIGH_PRINTK2("neigh %p is failed.\n", neigh); + neigh->updated = jiffies; + + /* It is very thin place. report_unreachable is very complicated + routine. Particularly, it can hit the same neighbour entry! + + So that, we try to be accurate and avoid dead loop. --ANK + */ + while (neigh->nud_state == NUD_FAILED && + (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { + write_unlock(&neigh->lock); + neigh->ops->error_report(neigh, skb); + write_lock(&neigh->lock); + } + skb_queue_purge(&neigh->arp_queue); +} + /* Called when a timer expires for a neighbour entry. */ static void neigh_timer_handler(unsigned long arg) @@ -835,26 +857,9 @@ static void neigh_timer_handler(unsigned long arg) if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { - struct sk_buff *skb; - neigh->nud_state = NUD_FAILED; - neigh->updated = jiffies; notify = 1; - NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); - NEIGH_PRINTK2("neigh %p is failed.\n", neigh); - - /* It is very thin place. report_unreachable is very complicated - routine. Particularly, it can hit the same neighbour entry! - - So that, we try to be accurate and avoid dead loop. --ANK - */ - while (neigh->nud_state == NUD_FAILED && - (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { - write_unlock(&neigh->lock); - neigh->ops->error_report(neigh, skb); - write_lock(&neigh->lock); - } - skb_queue_purge(&neigh->arp_queue); + neigh_invalidate(neigh); } if (neigh->nud_state & NUD_IN_TIMER) { @@ -1001,6 +1006,11 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh->nud_state = new; err = 0; notify = old & NUD_VALID; + if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && + (new & NUD_FAILED)) { + neigh_invalidate(neigh); + notify = 1; + } goto out; } @@ -1088,8 +1098,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, struct neighbour *n1 = neigh; write_unlock_bh(&neigh->lock); /* On shaper/eql skb->dst->neighbour != neigh :( */ - if (skb->dst && skb->dst->neighbour) - n1 = skb->dst->neighbour; + if (skb_dst(skb) && skb_dst(skb)->neighbour) + n1 = skb_dst(skb)->neighbour; n1->output(skb); write_lock_bh(&neigh->lock); } @@ -1182,7 +1192,7 @@ EXPORT_SYMBOL(neigh_compat_output); int neigh_resolve_output(struct sk_buff *skb) { - struct dst_entry *dst = skb->dst; + struct dst_entry *dst = skb_dst(skb); struct neighbour *neigh; int rc = 0; @@ -1229,7 +1239,7 @@ EXPORT_SYMBOL(neigh_resolve_output); int neigh_connected_output(struct sk_buff *skb) { int err; - struct dst_entry *dst = skb->dst; + struct dst_entry *dst = skb_dst(skb); struct neighbour *neigh = dst->neighbour; struct net_device *dev = neigh->dev; @@ -1298,8 +1308,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, if (time_before(tbl->proxy_timer.expires, sched_next)) sched_next = tbl->proxy_timer.expires; } - dst_release(skb->dst); - skb->dst = NULL; + skb_dst_drop(skb); dev_hold(skb->dev); __skb_queue_tail(&tbl->proxy_queue, skb); mod_timer(&tbl->proxy_timer, sched_next); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b8ccd3c88d6..19b8c20e98a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3691,8 +3691,7 @@ out1: #ifdef CONFIG_XFRM free_SAs(pkt_dev); #endif - if (pkt_dev->flows) - vfree(pkt_dev->flows); + vfree(pkt_dev->flows); kfree(pkt_dev); return err; } @@ -3791,8 +3790,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, #ifdef CONFIG_XFRM free_SAs(pkt_dev); #endif - if (pkt_dev->flows) - vfree(pkt_dev->flows); + vfree(pkt_dev->flows); kfree(pkt_dev); return 0; } diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c index 86234923a3b..79687dfd695 100644 --- a/net/core/skb_dma_map.c +++ b/net/core/skb_dma_map.c @@ -20,7 +20,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb, if (dma_mapping_error(dev, map)) goto out_err; - sp->dma_maps[0] = map; + sp->dma_head = map; for (i = 0; i < sp->nr_frags; i++) { skb_frag_t *fp = &sp->frags[i]; @@ -28,9 +28,8 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb, fp->size, dir); if (dma_mapping_error(dev, map)) goto unwind; - sp->dma_maps[i + 1] = map; + sp->dma_maps[i] = map; } - sp->num_dma_maps = i + 1; return 0; @@ -38,10 +37,10 @@ unwind: while (--i >= 0) { skb_frag_t *fp = &sp->frags[i]; - dma_unmap_page(dev, sp->dma_maps[i + 1], + dma_unmap_page(dev, sp->dma_maps[i], fp->size, dir); } - dma_unmap_single(dev, sp->dma_maps[0], + dma_unmap_single(dev, sp->dma_head, skb_headlen(skb), dir); out_err: return -ENOMEM; @@ -54,12 +53,12 @@ void skb_dma_unmap(struct device *dev, struct sk_buff *skb, struct skb_shared_info *sp = skb_shinfo(skb); int i; - dma_unmap_single(dev, sp->dma_maps[0], + dma_unmap_single(dev, sp->dma_head, skb_headlen(skb), dir); for (i = 0; i < sp->nr_frags; i++) { skb_frag_t *fp = &sp->frags[i]; - dma_unmap_page(dev, sp->dma_maps[i + 1], + dma_unmap_page(dev, sp->dma_maps[i], fp->size, dir); } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e815e685f2..b94d777e3eb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -210,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, shinfo->gso_type = 0; shinfo->ip6_frag_id = 0; shinfo->tx_flags.flags = 0; - shinfo->frag_list = NULL; + skb_frag_list_init(skb); memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); if (fclone) { @@ -323,7 +323,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; - for (list = skb_shinfo(skb)->frag_list; list; list = list->next) + skb_walk_frags(skb, list) skb_get(list); } @@ -338,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb) put_page(skb_shinfo(skb)->frags[i].page); } - if (skb_shinfo(skb)->frag_list) + if (skb_has_frags(skb)) skb_drop_fraglist(skb); kfree(skb->head); @@ -381,7 +381,7 @@ static void kfree_skbmem(struct sk_buff *skb) static void skb_release_head_state(struct sk_buff *skb) { - dst_release(skb->dst); + skb_dst_drop(skb); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif @@ -503,7 +503,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) shinfo->gso_type = 0; shinfo->ip6_frag_id = 0; shinfo->tx_flags.flags = 0; - shinfo->frag_list = NULL; + skb_frag_list_init(skb); memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); memset(skb, 0, offsetof(struct sk_buff, tail)); @@ -521,7 +521,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->transport_header = old->transport_header; new->network_header = old->network_header; new->mac_header = old->mac_header; - new->dst = dst_clone(old->dst); + skb_dst_set(new, dst_clone(skb_dst(old))); #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); #endif @@ -552,7 +552,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->vlan_tci = old->vlan_tci; #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) new->do_not_encrypt = old->do_not_encrypt; - new->requeue = old->requeue; #endif skb_copy_secmark(new, old); @@ -758,7 +757,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) skb_shinfo(n)->nr_frags = i; } - if (skb_shinfo(skb)->frag_list) { + if (skb_has_frags(skb)) { skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; skb_clone_fraglist(n); } @@ -821,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) get_page(skb_shinfo(skb)->frags[i].page); - if (skb_shinfo(skb)->frag_list) + if (skb_has_frags(skb)) skb_clone_fraglist(skb); skb_release_data(skb); @@ -1093,7 +1092,7 @@ drop_pages: for (; i < nfrags; i++) put_page(skb_shinfo(skb)->frags[i].page); - if (skb_shinfo(skb)->frag_list) + if (skb_has_frags(skb)) skb_drop_fraglist(skb); goto done; } @@ -1188,7 +1187,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) /* Optimization: no fragments, no reasons to preestimate * size of pulled pages. Superb. */ - if (!skb_shinfo(skb)->frag_list) + if (!skb_has_frags(skb)) goto pull_pages; /* Estimate size of pulled pages. */ @@ -1285,8 +1284,9 @@ EXPORT_SYMBOL(__pskb_pull_tail); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) { - int i, copy; int start = skb_headlen(skb); + struct sk_buff *frag_iter; + int i, copy; if (offset > (int)skb->len - len) goto fault; @@ -1328,28 +1328,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) start = end; } - if (skb_shinfo(skb)->frag_list) { - struct sk |