aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/Makefile3
-rw-r--r--net/core/dev.c562
-rw-r--r--net/core/dev_addr_lists.c115
-rw-r--r--net/core/dev_ioctl.c2
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/core/neighbour.c477
-rw-r--r--net/core/net-sysfs.c82
-rw-r--r--net/core/net-sysfs.h2
-rw-r--r--net/core/netclassid_cgroup.c120
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/netprio_cgroup.c2
-rw-r--r--net/core/pktgen.c88
-rw-r--r--net/core/rtnetlink.c176
-rw-r--r--net/core/skbuff.c366
-rw-r--r--net/core/sock.c43
-rw-r--r--net/core/stream.c2
-rw-r--r--net/core/sysctl_net_core.c3
17 files changed, 1368 insertions, 685 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index b33b996f5dd..9628c20acff 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -21,4 +21,5 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
obj-$(CONFIG_TRACEPOINTS) += net-traces.o
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
-obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
+obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
+obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 2e0c6a90f6f..3721db71635 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -147,6 +147,8 @@ struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
struct list_head ptype_all __read_mostly; /* Taps */
static struct list_head offload_base __read_mostly;
+static int netif_rx_internal(struct sk_buff *skb);
+
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
* semaphore.
@@ -480,7 +482,7 @@ EXPORT_SYMBOL(dev_add_offload);
* and must not be freed until after all the CPU's have gone
* through a quiescent state.
*/
-void __dev_remove_offload(struct packet_offload *po)
+static void __dev_remove_offload(struct packet_offload *po)
{
struct list_head *head = &offload_base;
struct packet_offload *po1;
@@ -498,7 +500,6 @@ void __dev_remove_offload(struct packet_offload *po)
out:
spin_unlock(&offload_lock);
}
-EXPORT_SYMBOL(__dev_remove_offload);
/**
* dev_remove_offload - remove packet offload handler
@@ -1118,6 +1119,8 @@ rollback:
write_seqcount_end(&devnet_rename_seq);
+ netdev_adjacent_rename_links(dev, oldname);
+
write_lock_bh(&dev_base_lock);
hlist_del_rcu(&dev->name_hlist);
write_unlock_bh(&dev_base_lock);
@@ -1137,6 +1140,7 @@ rollback:
err = ret;
write_seqcount_begin(&devnet_rename_seq);
memcpy(dev->name, oldname, IFNAMSIZ);
+ memcpy(oldname, newname, IFNAMSIZ);
goto rollback;
} else {
pr_err("%s: name change rollback failed: %d\n",
@@ -1566,14 +1570,14 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
* are as for raw_notifier_call_chain().
*/
-int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- struct netdev_notifier_info *info)
+static int call_netdevice_notifiers_info(unsigned long val,
+ struct net_device *dev,
+ struct netdev_notifier_info *info)
{
ASSERT_RTNL();
netdev_notifier_info_init(info, dev);
return raw_notifier_call_chain(&netdev_chain, val, info);
}
-EXPORT_SYMBOL(call_netdevice_notifiers_info);
/**
* call_netdevice_notifiers - call all network notifier blocks
@@ -1699,7 +1703,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
- return netif_rx(skb);
+ return netif_rx_internal(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -2079,7 +2083,7 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
/**
* netif_set_real_num_rx_queues - set actual number of RX queues used
* @dev: Network device
@@ -2145,30 +2149,42 @@ void __netif_schedule(struct Qdisc *q)
}
EXPORT_SYMBOL(__netif_schedule);
-void dev_kfree_skb_irq(struct sk_buff *skb)
+struct dev_kfree_skb_cb {
+ enum skb_free_reason reason;
+};
+
+static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
{
- if (atomic_dec_and_test(&skb->users)) {
- struct softnet_data *sd;
- unsigned long flags;
+ return (struct dev_kfree_skb_cb *)skb->cb;
+}
+
+void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+{
+ unsigned long flags;
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- skb->next = sd->completion_queue;
- sd->completion_queue = skb;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
+ if (likely(atomic_read(&skb->users) == 1)) {
+ smp_rmb();
+ atomic_set(&skb->users, 0);
+ } else if (likely(!atomic_dec_and_test(&skb->users))) {
+ return;
}
+ get_kfree_skb_cb(skb)->reason = reason;
+ local_irq_save(flags);
+ skb->next = __this_cpu_read(softnet_data.completion_queue);
+ __this_cpu_write(softnet_data.completion_queue, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
}
-EXPORT_SYMBOL(dev_kfree_skb_irq);
+EXPORT_SYMBOL(__dev_kfree_skb_irq);
-void dev_kfree_skb_any(struct sk_buff *skb)
+void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
{
if (in_irq() || irqs_disabled())
- dev_kfree_skb_irq(skb);
+ __dev_kfree_skb_irq(skb, reason);
else
dev_kfree_skb(skb);
}
-EXPORT_SYMBOL(dev_kfree_skb_any);
+EXPORT_SYMBOL(__dev_kfree_skb_any);
/**
@@ -2442,13 +2458,8 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
{
struct dev_gso_cb *cb;
- do {
- struct sk_buff *nskb = skb->next;
-
- skb->next = nskb->next;
- nskb->next = NULL;
- kfree_skb(nskb);
- } while (skb->next);
+ kfree_skb_list(skb->next);
+ skb->next = NULL;
cb = DEV_GSO_CB(skb);
if (cb->destructor)
@@ -2523,21 +2534,6 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_skb_features);
-/*
- * Returns true if either:
- * 1. skb has frag_list and the device doesn't support FRAGLIST, or
- * 2. skb is fragmented and the device does not support SG.
- */
-static inline int skb_needs_linearize(struct sk_buff *skb,
- netdev_features_t features)
-{
- return skb_is_nonlinear(skb) &&
- ((skb_has_frag_list(skb) &&
- !(features & NETIF_F_FRAGLIST)) ||
- (skb_shinfo(skb)->nr_frags &&
- !(features & NETIF_F_SG)));
-}
-
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
@@ -2605,8 +2601,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
dev_queue_xmit_nit(skb, dev);
skb_len = skb->len;
- rc = ops->ndo_start_xmit(skb, dev);
-
+ trace_net_dev_start_xmit(skb, dev);
+ rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc, dev, skb_len);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
@@ -2624,6 +2620,7 @@ gso:
dev_queue_xmit_nit(nskb, dev);
skb_len = nskb->len;
+ trace_net_dev_start_xmit(nskb, dev);
rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
@@ -2744,7 +2741,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
@@ -2781,8 +2778,9 @@ int dev_loopback_xmit(struct sk_buff *skb)
EXPORT_SYMBOL(dev_loopback_xmit);
/**
- * dev_queue_xmit - transmit a buffer
+ * __dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
+ * @accel_priv: private data used for L2 forwarding offload
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
@@ -3014,7 +3012,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
}
skb_reset_network_header(skb);
- if (!skb_get_rxhash(skb))
+ if (!skb_get_hash(skb))
goto done;
flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@ -3159,7 +3157,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
rcu_read_lock();
fl = rcu_dereference(sd->flow_limit);
if (fl) {
- new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
+ new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
old_flow = fl->history[fl->history_head];
fl->history[fl->history_head] = new_flow;
@@ -3227,22 +3225,7 @@ enqueue:
return NET_RX_DROP;
}
-/**
- * netif_rx - post buffer to the network code
- * @skb: buffer to post
- *
- * This function receives a packet from a device driver and queues it for
- * the upper (protocol) levels to process. It always succeeds. The buffer
- * may be dropped during processing for congestion control or by the
- * protocol layers.
- *
- * return values:
- * NET_RX_SUCCESS (no congestion)
- * NET_RX_DROP (packet was dropped)
- *
- */
-
-int netif_rx(struct sk_buff *skb)
+static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
@@ -3278,14 +3261,38 @@ int netif_rx(struct sk_buff *skb)
}
return ret;
}
+
+/**
+ * netif_rx - post buffer to the network code
+ * @skb: buffer to post
+ *
+ * This function receives a packet from a device driver and queues it for
+ * the upper (protocol) levels to process. It always succeeds. The buffer
+ * may be dropped during processing for congestion control or by the
+ * protocol layers.
+ *
+ * return values:
+ * NET_RX_SUCCESS (no congestion)
+ * NET_RX_DROP (packet was dropped)
+ *
+ */
+
+int netif_rx(struct sk_buff *skb)
+{
+ trace_netif_rx_entry(skb);
+
+ return netif_rx_internal(skb);
+}
EXPORT_SYMBOL(netif_rx);
int netif_rx_ni(struct sk_buff *skb)
{
int err;
+ trace_netif_rx_ni_entry(skb);
+
preempt_disable();
- err = netif_rx(skb);
+ err = netif_rx_internal(skb);
if (local_softirq_pending())
do_softirq();
preempt_enable();
@@ -3311,7 +3318,10 @@ static void net_tx_action(struct softirq_action *h)
clist = clist->next;
WARN_ON(atomic_read(&skb->users));
- trace_kfree_skb(skb, net_tx_action);
+ if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
+ trace_consume_skb(skb);
+ else
+ trace_kfree_skb(skb, net_tx_action);
__kfree_skb(skb);
}
}
@@ -3667,22 +3677,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
return ret;
}
-/**
- * netif_receive_skb - process receive buffer from network
- * @skb: buffer to process
- *
- * netif_receive_skb() is the main receive data processing function.
- * It always succeeds. The buffer may be dropped during processing
- * for congestion control or by the protocol layers.
- *
- * This function may only be called from softirq context and interrupts
- * should be enabled.
- *
- * Return values (usually ignored):
- * NET_RX_SUCCESS: no congestion
- * NET_RX_DROP: packet was dropped
- */
-int netif_receive_skb(struct sk_buff *skb)
+static int netif_receive_skb_internal(struct sk_buff *skb)
{
net_timestamp_check(netdev_tstamp_prequeue, skb);
@@ -3708,6 +3703,28 @@ int netif_receive_skb(struct sk_buff *skb)
#endif
return __netif_receive_skb(skb);
}
+
+/**
+ * netif_receive_skb - process receive buffer from network
+ * @skb: buffer to process
+ *
+ * netif_receive_skb() is the main receive data processing function.
+ * It always succeeds. The buffer may be dropped during processing
+ * for congestion control or by the protocol layers.
+ *
+ * This function may only be called from softirq context and interrupts
+ * should be enabled.
+ *
+ * Return values (usually ignored):
+ * NET_RX_SUCCESS: no congestion
+ * NET_RX_DROP: packet was dropped
+ */
+int netif_receive_skb(struct sk_buff *skb)
+{
+ trace_netif_receive_skb_entry(skb);
+
+ return netif_receive_skb_internal(skb);
+}
EXPORT_SYMBOL(netif_receive_skb);
/* Network device is going away, flush any packets still pending
@@ -3757,7 +3774,7 @@ static int napi_gro_complete(struct sk_buff *skb)
if (ptype->type != type || !ptype->callbacks.gro_complete)
continue;
- err = ptype->callbacks.gro_complete(skb);
+ err = ptype->callbacks.gro_complete(skb, 0);
break;
}
rcu_read_unlock();
@@ -3769,7 +3786,7 @@ static int napi_gro_complete(struct sk_buff *skb)
}
out:
- return netif_receive_skb(skb);
+ return netif_receive_skb_internal(skb);
}
/* napi->gro_list contains packets ordered by age.
@@ -3805,10 +3822,18 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff *p;
unsigned int maclen = skb->dev->hard_header_len;
+ u32 hash = skb_get_hash_raw(skb);
for (p = napi->gro_list; p; p = p->next) {
unsigned long diffs;
+ NAPI_GRO_CB(p)->flush = 0;
+
+ if (hash != skb_get_hash_raw(p)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= p->vlan_tci ^ skb->vlan_tci;
if (maclen == ETH_HLEN)
@@ -3819,7 +3844,23 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
skb_gro_mac_header(skb),
maclen);
NAPI_GRO_CB(p)->same_flow = !diffs;
- NAPI_GRO_CB(p)->flush = 0;
+ }
+}
+
+static void skb_gro_reset_offset(struct sk_buff *skb)
+{
+ const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ const skb_frag_t *frag0 = &pinfo->frags[0];
+
+ NAPI_GRO_CB(skb)->data_offset = 0;
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+
+ if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
+ pinfo->nr_frags &&
+ !PageHighMem(skb_frag_page(frag0))) {
+ NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
}
}
@@ -3838,7 +3879,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (skb_is_gso(skb) || skb_has_frag_list(skb))
goto normal;
+ skb_gro_reset_offset(skb);
gro_list_prepare(napi, skb);
+ NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
@@ -3850,6 +3893,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
+ NAPI_GRO_CB(skb)->udp_mark = 0;
pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
break;
@@ -3874,10 +3918,23 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (same_flow)
goto ok;
- if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
+ if (NAPI_GRO_CB(skb)->flush)
goto normal;
- napi->gro_count++;
+ if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
+ struct sk_buff *nskb = napi->gro_list;
+
+ /* locate the end of the list to select the 'oldest' flow */
+ while (nskb->next) {
+ pp = &nskb->next;
+ nskb = *pp;
+ }
+ *pp = NULL;
+ nskb->next = NULL;
+ napi_gro_complete(nskb);
+ } else {
+ napi->gro_count++;
+ }
NAPI_GRO_CB(skb)->count = 1;
NAPI_GRO_CB(skb)->age = jiffies;
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
@@ -3915,12 +3972,39 @@ normal:
goto pull;
}
+struct packet_offload *gro_find_receive_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_receive)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(gro_find_receive_by_type);
+
+struct packet_offload *gro_find_complete_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_complete)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(gro_find_complete_by_type);
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
{
switch (ret) {
case GRO_NORMAL:
- if (netif_receive_skb(skb))
+ if (netif_receive_skb_internal(skb))
ret = GRO_DROP;
break;
@@ -3943,26 +4027,9 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
return ret;
}
-static void skb_gro_reset_offset(struct sk_buff *skb)
-{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
-
- NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-
- if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
- pinfo->nr_frags &&
- !PageHighMem(skb_frag_page(frag0))) {
- NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
- }
-}
-
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
- skb_gro_reset_offset(skb);
+ trace_napi_gro_receive_entry(skb);
return napi_skb_finish(dev_gro_receive(napi, skb), skb);
}
@@ -3986,8 +4053,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
if (!skb) {
skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
- if (skb)
- napi->skb = skb;
+ napi->skb = skb;
}
return skb;
}
@@ -3998,12 +4064,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
{
switch (ret) {
case GRO_NORMAL:
- case GRO_HELD:
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if (ret == GRO_HELD)
- skb_gro_pull(skb, -ETH_HLEN);
- else if (netif_receive_skb(skb))
+ if (netif_receive_skb_internal(skb))
ret = GRO_DROP;
break;
@@ -4012,6 +4073,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
napi_reuse_skb(napi, skb);
break;
+ case GRO_HELD:
case GRO_MERGED:
break;
}
@@ -4022,36 +4084,15 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
- struct ethhdr *eth;
- unsigned int hlen;
- unsigned int off;
napi->skb = NULL;
- skb_reset_mac_header(skb);
- skb_gro_reset_offset(skb);
-
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*eth);
- eth = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- eth = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!eth)) {
- napi_reuse_skb(napi, skb);
- skb = NULL;
- goto out;
- }
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
+ napi_reuse_skb(napi, skb);
+ return NULL;
}
+ skb->protocol = eth_type_trans(skb, skb->dev);
- skb_gro_pull(skb, sizeof(*eth));
-
- /*
- * This works because the only protocols we care about don't require
- * special handling. We'll fix it up properly at the end.
- */
- skb->protocol = eth->h_proto;
-
-out:
return skb;
}
@@ -4062,12 +4103,14 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
if (!skb)
return GRO_DROP;
+ trace_napi_gro_frags_entry(skb);
+
return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
}
EXPORT_SYMBOL(napi_gro_frags);
/*
- * net_rps_action sends any pending IPI's for rps.
+ * net_rps_action_and_irq_enable sends any pending IPI's for rps.
* Note: called with local irq disabled, but exits with local irq enabled.
*/
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
@@ -4272,17 +4315,10 @@ EXPORT_SYMBOL(netif_napi_add);
void netif_napi_del(struct napi_struct *napi)
{
- struct sk_buff *skb, *next;
-
list_del_init(&napi->dev_list);
napi_free_frags(napi);
- for (skb = napi->gro_list; skb; skb = next) {
- next = skb->next;
- skb->next = NULL;
- kfree_skb(skb);
- }
-
+ kfree_skb_list(napi->gro_list);
napi->gro_list = NULL;
napi->gro_count = 0;
}
@@ -4399,19 +4435,6 @@ struct netdev_adjacent {
struct rcu_head rcu;
};
-static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
- struct net_device *adj_dev,
- struct list_head *adj_list)
-{
- struct netdev_adjacent *adj;
-
- list_for_each_entry_rcu(adj, adj_list, list) {
- if (adj->dev == adj_dev)
- return adj;
- }
- return NULL;
-}
-
static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *adj_list)
@@ -4450,13 +4473,12 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
* Find out if a device is linked to an upper device and return true in case
* it is. The caller must hold the RTNL lock.
*/
-bool netdev_has_any_upper_dev(struct net_device *dev)
+static bool netdev_has_any_upper_dev(struct net_device *dev)
{
ASSERT_RTNL();
return !list_empty(&dev->all_adj_list.upper);
}
-EXPORT_SYMBOL(netdev_has_any_upper_dev);
/**
* netdev_master_upper_dev_get - Get master upper device
@@ -4576,6 +4598,27 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
/**
+ * netdev_lower_get_first_private_rcu - Get the first ->private from the
+ * lower neighbour list, RCU
+ * variant
+ * @dev: device
+ *
+ * Gets the first netdev_adjacent->private from the dev's lower neighbour
+ * list. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_first_private_rcu(struct net_device *dev)
+{
+ struct netdev_adjacent *lower;
+
+ lower = list_first_or_null_rcu(&dev->adj_list.lower,
+ struct netdev_adjacent, list);
+ if (lower)
+ return lower->private;
+ return NULL;
+}
+EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
+
+/**
* netdev_master_upper_dev_get_rcu - Get master upper device
* @dev: device
*
@@ -4594,13 +4637,36 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
+int netdev_adjacent_sysfs_add(struct net_device *dev,
+ struct net_device *adj_dev,
+ struct list_head *dev_list)
+{
+ char linkname[IFNAMSIZ+7];
+ sprintf(linkname, dev_list == &dev->adj_list.upper ?
+ "upper_%s" : "lower_%s", adj_dev->name);
+ return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
+ linkname);
+}
+void netdev_adjacent_sysfs_del(struct net_device *dev,
+ char *name,
+ struct list_head *dev_list)
+{
+ char linkname[IFNAMSIZ+7];
+ sprintf(linkname, dev_list == &dev->adj_list.upper ?
+ "upper_%s" : "lower_%s", name);
+ sysfs_remove_link(&(dev->dev.kobj), linkname);
+}
+
+#define netdev_adjacent_is_neigh_list(dev, dev_list) \
+ (dev_list == &dev->adj_list.upper || \
+ dev_list == &dev->adj_list.lower)
+
static int __netdev_adjacent_dev_insert(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list,
void *private, bool master)
{
struct netdev_adjacent *adj;
- char linkname[IFNAMSIZ+7];
int ret;
adj = __netdev_find_adj(dev, adj_dev, dev_list);
@@ -4623,16 +4689,8 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
pr_debug("dev_hold for %s, because of link added from %s to %s\n",
adj_dev->name, dev->name, adj_dev->name);
- if (dev_list == &dev->adj_list.lower) {
- sprintf(linkname, "lower_%s", adj_dev->name);
- ret = sysfs_create_link(&(dev->dev.kobj),
- &(adj_dev->dev.kobj), linkname);
- if (ret)
- goto free_adj;
- } else if (dev_list == &dev->adj_list.upper) {
- sprintf(linkname, "upper_%s", adj_dev->name);
- ret = sysfs_create_link(&(dev->dev.kobj),
- &(adj_dev->dev.kobj), linkname);
+ if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
+ ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
if (ret)
goto free_adj;
}
@@ -4652,14 +4710,8 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
return 0;
remove_symlinks:
- if (dev_list == &dev->adj_list.lower) {
- sprintf(linkname, "lower_%s", adj_dev->name);
- sysfs_remove_link(&(dev->dev.kobj), linkname);
- } else if (dev_list == &dev->adj_list.upper) {
- sprintf(linkname, "upper_%s", adj_dev->name);
- sysfs_remove_link(&(dev->dev.kobj), linkname);
- }
-
+ if (netdev_adjacent_is_neigh_list(dev, dev_list))
+ netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
free_adj:
kfree(adj);
dev_put(adj_dev);
@@ -4667,12 +4719,11 @@ free_adj:
return ret;
}
-void __netdev_adjacent_dev_remove(struct net_device *dev,
- struct net_device *adj_dev,
- struct list_head *dev_list)
+static void __netdev_adjacent_dev_remove(struct net_device *dev,
+ struct net_device *adj_dev,
+ struct list_head *dev_list)
{
struct netdev_adjacent *adj;
- char linkname[IFNAMSIZ+7];
adj = __netdev_find_adj(dev, adj_dev, dev_list);
@@ -4692,13 +4743,8 @@ void __netdev_adjacent_dev_remove(struct net_device *dev,
if (adj->master)
sysfs_remove_link(&(dev->dev.kobj), "master");
- if (dev_list == &dev->adj_list.lower) {
- sprintf(linkname, "lower_%s", adj_dev->name);
- sysfs_remove_link(&(dev->dev.kobj), linkname);
- } else if (dev_list == &dev->adj_list.upper) {
- sprintf(linkname, "upper_%s", adj_dev->name);
- sysfs_remove_link(&(dev->dev.kobj), linkname);
- }
+ if (netdev_adjacent_is_neigh_list(dev, dev_list))
+ netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
list_del_rcu(&adj->list);
pr_debug("dev_put for %s, because link removed from %s to %s\n",
@@ -4707,11 +4753,11 @@ void __netdev_adjacent_dev_remove(struct net_device *dev,
kfree_rcu(adj, rcu);
}
-int __netdev_adjacent_dev_link_lists(struct net_device *dev,
- struct net_device *upper_dev,
- struct list_head *up_list,
- struct list_head *down_list,
- void *private, bool master)
+static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+ struct net_device *upper_dev,
+ struct list_head *up_list,
+ struct list_head *down_list,
+ void *private, bool master)
{
int ret;
@@ -4730,8 +4776,8 @@ int __netdev_adjacent_dev_link_lists(struct net_device *dev,
return 0;
}
-int __netdev_adjacent_dev_link(struct net_device *dev,
- struct net_device *upper_dev)
+static int __netdev_adjacent_dev_link(struct net_device *dev,
+ struct net_device *upper_dev)
{
return __netdev_adjacent_dev_link_lists(dev, upper_dev,
&dev->all_adj_list.upper,
@@ -4739,26 +4785,26 @@ int __netdev_adjacent_dev_link(struct net_device *dev,
NULL, false);
}
-void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
- struct net_device *upper_dev,
- struct list_head *up_list,
- struct list_head *down_list)
+static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+ struct net_device *upper_dev,
+ struct list_head *up_list,
+ struct list_head *down_list)
{
__netdev_adjacent_dev_remove(dev, upper_dev, up_list);
__netdev_adjacent_dev_remove(upper_dev, dev, down_list);
}
-void __netdev_adjacent_dev_unlink(struct net_device *dev,
- struct net_device *upper_dev)
+static void __netdev_adjacent_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev)
{
__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
&dev->all_adj_list.upper,
&upper_dev->all_adj_list.lower);
}
-int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
- struct net_device *upper_dev,
- void *private, bool master)
+static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private, bool master)
{
int ret = __netdev_adjacent_dev_link(dev, upper_dev);
@@ -4777,8 +4823,8 @@ int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
return 0;
}
-void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
- struct net_device *upper_dev)
+static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+ struct net_device *upper_dev)
{
__netdev_adjacent_dev_unlink(dev, upper_dev);
__netdev_adjacent_dev_unlink_lists(dev, upper_dev,
@@ -4967,20 +5013,24 @@ void netdev_upper_dev_unlink(struct net_device *dev,
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
-void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
- struct net_device *lower_dev)
+void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
{
- struct netdev_adjacent *lower;
+ struct netdev_adjacent *iter;
- if (!lower_dev)
- return NULL;
- lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
- if (!lower)
- return NULL;
+ list_for_each_entry(iter, &dev->adj_list.upper, list) {
+ netdev_adjacent_sysfs_del(iter->dev, oldname,
+ &iter->dev->adj_list.lower);
+ netdev_adjacent_sysfs_add(iter->dev, dev,
+ &iter->dev->adj_list.lower);
+ }
- return lower->private;
+ list_for_each_entry(iter, &dev->adj_list.lower, list) {
+ netdev_adjacent_sysfs_del(iter->dev, oldname,
+ &iter->dev->adj_list.upper);
+ netdev_adjacent_sysfs_add(iter->dev, dev,
+ &iter->dev->adj_list.upper);
+ }
}
-EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev)
@@ -5314,6 +5364,17 @@ int dev_change_flags(struct net_device *dev, unsigned int flags)
}
EXPORT_SYMBOL(dev_change_flags);
+static int __dev_set_mtu(struct net_device *dev, int new_mtu)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (ops->ndo_change_mtu)
+ return ops->ndo_change_mtu(dev, new_mtu);
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
/**
* dev_set_mtu - Change maximum transfer unit
* @dev: device
@@ -5323,8 +5384,7 @@ EXPORT_SYMBOL(dev_change_flags);
*/
int dev_set_mtu(struct net_device *dev, int new_mtu)
{
- const struct net_device_ops *ops = dev->netdev_ops;
- int err;
+ int err, orig_mtu;
if (new_mtu == dev->mtu)
return 0;
@@ -5336,14 +5396,25 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
if (!netif_device_present(dev))
return -ENODEV;
- err = 0;
- if (ops->ndo_change_mtu)
- err = ops->ndo_change_mtu(dev, new_mtu);
- else
- dev->mtu = new_mtu;
+ err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
+ err = notifier_to_errno(err);
+ if (err)
+ return err;
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ orig_mtu = dev->mtu;
+ err = __dev_set_mtu(dev, new_mtu);
+
+ if (!err) {
+ err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ err = notifier_to_errno(err);
+ if (err) {
+ /* setting mtu back and notifying everyone again,
+ * so that they have a chance to revert changes.
+ */
+ __dev_set_mtu(dev, orig_mtu);
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ }
+ }
return err;
}
EXPORT_SYMBOL(dev_set_mtu);
@@ -5697,7 +5768,7 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
static int netif_alloc_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
@@ -5836,13 +5907,8 @@ int register_netdevice(struct net_device *dev)
dev->features |= NETIF_F_SOFT_FEATURES;
dev->wanted_features = dev->features & dev->hw_features;
- /* Turn on no cache copy if HW is doing checksum */
if (!(dev->flags & IFF_LOOPBACK)) {
dev->hw_features |= NETIF_F_NOCACHE_COPY;
- if (dev->features & NETIF_F_ALL_CSUM) {
- dev->wanted_features |= NETIF_F_NOCACHE_COPY;
- dev->features |= NETIF_F_NOCACHE_COPY;
- }
}
/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6247,7 +6313,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
return NULL;
}
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
if (rxqs < 1) {
pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
return NULL;
@@ -6303,7 +6369,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
if (netif_alloc_netdev_queues(dev))
goto free_all;
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
dev->num_rx_queues = rxqs;
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
@@ -6323,7 +6389,7 @@ free_all:
free_pcpu:
free_percpu(dev->pcpu_refcnt);
netif_free_tx_queues(dev);
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
kfree(dev->_rx);
#endif
@@ -6348,7 +6414,7 @@ void free_netdev(struct net_device *dev)
release_net(dev_net(dev));
netif_free_tx_queues(dev);
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
kfree(dev->_rx);
#endif
@@ -6618,11 +6684,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
- netif_rx(skb);
+ netif_rx_internal(skb);
input_queue_head_incr(oldsd);
}
while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
- netif_rx(skb);
+ netif_rx_internal(skb);
input_queue_head_incr(oldsd);
}
@@ -6935,28 +7001,18 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
- memset(sd, 0, sizeof(*sd));
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
- sd->completion_queue = NULL;
INIT_LIST_HEAD(&sd->poll_list);
- sd->output_queue = NULL;
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
sd->csd.func = rps_trigger_softirq;
sd->csd.info = sd;
- sd->csd.flags = 0;
sd->cpu = i;
#endif
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
- sd->backlog.gro_list = NULL;
- sd->backlog.gro_count = 0;
-
-#ifdef CONFIG_NET_FLOW_LIMIT
- sd->flow_limit = NULL;
-#endif
}
dev_boot_phase = 0;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index ec40a849fc4..329d5794e7d 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -38,7 +38,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
ha->type = addr_type;
ha->refcount = 1;
ha->global_use = global;
- ha->synced = sync;
+ ha->synced = sync ? 1 : 0;
ha->sync_cnt = 0;
list_add_tail_rcu(&ha->list, &list->list);
list->count++;
@@ -48,7 +48,8 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
- unsigned char addr_type, bool global, bool sync)
+ unsigned char addr_type, bool global, bool sync,
+ int sync_count)
{
struct netdev_hw_addr *ha;
@@ -66,10 +67,10 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
ha->global_use = true;
}
if (sync) {
- if (ha->synced)
+ if (ha->synced && sync_count)
return -EEXIST;
else
- ha->synced = true;
+ ha->synced++;
}
ha->refcount++;
return 0;
@@ -84,7 +85,8 @@ static int __hw_addr_add(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type)
{
- return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false);
+ return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false,
+ 0);
}
static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
@@ -101,7 +103,7 @@ static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
ha->global_use = false;
if (sync)
- ha->synced = false;
+ ha->synced--;
if (--ha->refcount)
return 0;
@@ -139,7 +141,7 @@ static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
int err;
err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
- false, true);
+ false, true, ha->sync_cnt);
if (err && err != -EEXIST)
return err;
@@ -186,47 +188,6 @@ static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
return err;
}
-int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type)
-{
- int err;
- struct netdev_hw_addr *ha, *ha2;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- err = __hw_addr_add(to_list, ha->addr, addr_len, type);
- if (err)
- goto unroll;
- }
- return 0;
-
-unroll:
- list_for_each_entry(ha2, &from_list->list, list) {
- if (ha2 == ha)
- break;
- type = addr_type ? addr_type : ha2->type;
- __hw_addr_del(to_list, ha2->addr, addr_len, type);
- }
- return err;
-}
-EXPORT_SYMBOL(__hw_addr_add_multiple);
-
-void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type)
-{
- struct netdev_hw_addr *ha;
- unsigned char type;
-
- list_for_each_entry(ha, &from_list->list, list) {
- type = addr_type ? addr_type : ha->type;
- __hw_addr_del(to_list, ha->addr, addr_len, type);
- }
-}
-EXPORT_SYMBOL(__hw_addr_del_multiple);
-
/* This function only works where there is a strict 1-1 relationship
* between source and destionation of they synch. If you ever need to
* sync addresses to more then 1 destination, you need to use
@@ -264,7 +225,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
}
EXPORT_SYMBOL(__hw_addr_unsync);
-void __hw_addr_flush(struct netdev_hw_addr_list *list)
+static void __hw_addr_flush(struct netdev_hw_addr_list *list)
{
struct netdev_hw_addr *ha, *tmp;
@@ -274,7 +235,6 @@ void __hw_addr_flush(struct netdev_hw_addr_list *list)
}
list->count = 0;
}
-EXPORT_SYMBOL(__hw_addr_flush);
void __hw_addr_init(struct netdev_hw_addr_list *list)
{
@@ -400,59 +360,6 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr,
}
EXPORT_SYMBOL(dev_addr_del);
-/**
- * dev_addr_add_multiple - Add device addresses from another device
- * @to_dev: device to which addresses will be added
- * @from_dev: device from which addresses will be added
- * @addr_type: address type - 0 means type will be used from from_dev
- *
- * Add device addresses of the one device to another.
- **
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- int err;
-
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return err;
-}
-EXPORT_SYMBOL(dev_addr_add_multiple);
-
-/**
- * dev_addr_del_multiple - Delete device addresses by another device
- * @to_dev: device where the addresses will be deleted
- * @from_dev: device supplying the addresses to be deleted
- * @addr_type: address type - 0 means type will be used from from_dev
- *
- * Deletes addresses in to device by the list of addresses in from device.
- *
- * The caller must hold the rtnl_mutex.
- */
-int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type)
-{
- ASSERT_RTNL();
-
- if (from_dev->addr_len != to_dev->addr_len)
- return -EINVAL;
- __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
- to_dev->addr_len, addr_type);
- call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
- return 0;
-}
-EXPORT_SYMBOL(dev_addr_del_multiple);
-
/*
* Unicast list handling functions
*/
@@ -676,7 +583,7 @@ static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
netif_addr_lock_bh(dev);
err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
- NETDEV_HW_ADDR_T_MULTICAST, global, false);
+ NETDEV_HW_ADDR_T_MULTICAST, global, false, 0);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 5b7d0e1d066..cf999e09bcd 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -327,6 +327,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
cmd == SIOCBRADDIF ||
cmd == SIOCBRDELIF ||
cmd == SIOCSHWTSTAMP ||
+ cmd == SIOCGHWTSTAMP ||
cmd == SIOCWANDEV) {
err = -EOPNOTSUPP;
if (ops->ndo_do_ioctl) {
@@ -546,6 +547,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
*/
default:
if (cmd == SIOCWANDEV ||
+ cmd == SIOCGHWTSTAMP ||
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
dev_load(net, ifr.ifr_name);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2fc5beaf578..87577d44755 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -202,12 +202,12 @@ static __always_inline u32 __flow_hash_1word(u32 a)
}
/*
- * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
+ * __skb_get_hash: calculate a flow hash based on src/dst addresses
* and src/dst port numbers. Sets rxhash in skb to non-zero hash value
* on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
* if hash is a canonical 4-tuple hash over transport ports.
*/
-void __skb_get_rxhash(struct sk_buff *skb)
+void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
@@ -234,7 +234,7 @@ void __skb_get_rxhash(struct sk_buff *skb)
skb->rxhash = hash;
}
-EXPORT_SYMBOL(__skb_get_rxhash);
+EXPORT_SYMBOL(__skb_get_hash);
/*
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 932c6d7cf66..b9e9e0d3867 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -38,6 +38,8 @@
#include <linux/random.h>
#include <linux/string.h>
#include <linux/log2.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
#define DEBUG
#define NEIGH_DEBUG 1
@@ -115,7 +117,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
unsigned long neigh_rand_reach_time(unsigned long base)
{
- return base ? (net_random() % base) + (base >> 1) : 0;
+ return base ? (prandom_u32() % base) + (base >> 1) : 0;
}
EXPORT_SYMBOL(neigh_rand_reach_time);
@@ -497,7 +499,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
goto out_neigh_release;
}
- n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
+ n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
@@ -776,7 +778,7 @@ static void neigh_periodic_work(struct work_struct *work)
tbl->last_rand = jiffies;
for (p = &tbl->parms; p; p = p->next)
p->reachable_time =
- neigh_rand_reach_time(p->base_reachable_time);
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -799,7 +801,7 @@ static void neigh_periodic_work(struct work_struct *work)
if (atomic_read(&n->refcnt) == 1 &&
(state == NUD_FAILED ||
- time_after(jiffies, n->used + n->parms->gc_staletime))) {
+ time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
n->dead = 1;
write_unlock(&n->lock);
@@ -822,12 +824,12 @@ next_elt:
lockdep_is_held(&tbl->lock));
}
out:
- /* Cycle through all hash buckets every base_reachable_time/2 ticks.
- * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
- * base_reachable_time.
+ /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
+ * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
+ * BASE_REACHABLE_TIME.
*/
- schedule_delayed_work(&tbl->gc_work,
- tbl->parms.base_reachable_time >> 1);
+ queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
+ NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
write_unlock_bh(&tbl->lock);
}
@@ -835,8 +837,9 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
return (n->nud_state & NUD_PROBE) ?
- p->ucast_probes :
- p->ucast_probes + p->app_probes + p->mcast_probes;
+ NEIGH_VAR(p, UCAST_PROBES) :
+ NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
+ NEIGH_VAR(p, MCAST_PROBES);
}
static void neigh_invalidate(struct neighbour *neigh)
@@ -901,12 +904,13 @@ static void neigh_timer_handler(unsigned long arg)
neigh_dbg(2, "neigh %p is still alive\n", neigh);
next = neigh->confirmed + neigh->parms->reachable_time;
} else if (time_before_eq(now,
- neigh->used + neigh->parms->delay_probe_time)) {
+ neigh->used +
+ NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
neigh_dbg(2, "neigh %p is delayed\n", neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
neigh_suspect(neigh);
- next = now + neigh->parms->delay_probe_time;
+ next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
} else {
neigh_dbg(2, "neigh %p is suspected\n", neigh);
neigh->nud_state = NUD_STALE;
@@ -916,7 +920,8 @@ static void neigh_timer_handler(unsigned long arg)
}
} else if (state & NUD_DELAY) {
if (time_before_eq(now,
- neigh->confirmed + neigh->parms->delay_probe_time)) {
+ neigh->confirmed +
+ NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
neigh_dbg(2, "neigh %p is now reachable\n", neigh);
neigh->nud_state = NUD_REACHABLE;
neigh->updated = jiffies;
@@ -928,11 +933,11 @@ static void neigh_timer_handler(unsigned long arg)
neigh->nud_state = NUD_PROBE;
neigh->updated = jiffies;
atomic_set(&neigh->probes, 0);
- next = now + neigh->parms->retrans_time;
+ next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
}
} else {
/* NUD_PROBE|NUD_INCOMPLETE */
- next = now + neigh->parms->retrans_time;
+ next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
}
if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
@@ -973,13 +978,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
goto out_unlock_bh;
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
- if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+ if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
+ NEIGH_VAR(neigh->parms, APP_PROBES)) {
unsigned long next, now = jiffies;
- atomic_set(&neigh->probes, neigh->parms->ucast_probes);
+ atomic_set(&neigh->probes,
+ NEIGH_VAR(neigh->parms, UCAST_PROBES));
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
- next = now + max(neigh->parms->retrans_time, HZ/2);
+ next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+ HZ/2);
neigh_add_timer(neigh, next);
immediate_probe = true;
} else {
@@ -994,14 +1002,14 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
neigh_dbg(2, "neigh %p is delayed\n", neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
- neigh_add_timer(neigh,
- jiffies + neigh->parms->delay_probe_time);
+ neigh_add_timer(neigh, jiffies +
+ NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
}
if (neigh->nud_state == NUD_INCOMPLETE) {
if (skb) {
while (neigh->arp_queue_len_bytes + skb->truesize >
- neigh->parms->queue_len_bytes) {
+ NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
struct sk_buff *buff;
buff = __skb_dequeue(&neigh->arp_queue);
@@ -1171,7 +1179,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh_update_hhs(neigh);
if (!(new & NUD_CONNECTED))
neigh->confirmed = jiffies -
- (neigh->parms->base_reachable_time << 1);
+ (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
notify = 1;
}
if (new == old)
@@ -1231,6 +1239,21 @@ out:
}
EXPORT_SYMBOL(neigh_update);
+/* Update the neigh to listen temporarily for probe responses, even if it is
+ * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
+ */
+void __neigh_set_probe_once(struct neighbour *neigh)
+{
+ neigh->updated = jiffies;
+ if (!(neigh->nud_state & NUD_FAILED))
+ return;
+ neigh->nud_state = NUD_PROBE;
+ atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ neigh_add_timer(neigh,
+ jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
+}
+EXPORT_SYMBOL(__neigh_set_probe_once);
+
struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev)
@@ -1392,9 +1415,11 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb)
{
unsigned long now = jiffies;
- unsigned long sched_next = now + (net_random() % p->proxy_delay);
- if (tbl->proxy_queue.qlen > p->proxy_qlen) {
+ unsigned long sched_next = now + (prandom_u32() %
+ NEIGH_VAR(p, PROXY_DELAY));
+
+ if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
kfree_skb(skb);
return;
}
@@ -1441,7 +1466,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
p->tbl = tbl;
atomic_set(&p->refcnt, 1);
p->reachable_time =
- neigh_rand_reach_time(p->base_reachable_time);
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
dev_hold(dev);
p->dev = dev;
write_pnet(&p->net, hold_net(net));
@@ -1458,6 +1483,8 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
p->next = tbl->parms.next;
tbl->parms.next = p;
write_unlock_bh(&tbl->lock);
+
+ neigh_parms_data_state_cleanall(p);
}
return p;
}
@@ -1510,7 +1537,7 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
write_pnet(&tbl->parms.net, &init_net);
atomic_set(&tbl->parms.refcnt, 1);
tbl->parms.reachable_time =
- neigh_rand_reach_time(tbl->parms.base_reachable_time);
+ neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
@@ -1538,7 +1565,8 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
rwlock_init(&tbl->lock);
INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
- schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
+ queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
+ tbl->parms.reachable_time);
setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
skb_queue_head_init_class(&tbl->proxy_queue,
&neigh_table_proxy_queue_class);
@@ -1778,24 +1806,32 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
if ((parms->dev &&
nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
- nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
+ nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
+ NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
/* approximative value for deprecated QUEUE_LEN (in packets) */
nla_put_u32(skb, NDTPA_QUEUE_LEN,
- parms->queue_len_bytes / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
- nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
- nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
- nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
- nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
+ NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
+ nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
+ nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
+ nla_put_u32(skb, NDTPA_UCAST_PROBES,
+ NEIGH_VAR(parms, UCAST_PROBES)) ||
+ nla_put_u32(skb, NDTPA_MCAST_PROBES,
+ NEIGH_VAR(parms, MCAST_PROBES)) ||
nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
- parms->base_reachable_time) ||
- nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
+ NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
+ nla_put_msecs(skb, NDTPA_GC_STALETIME,
+ NEIGH_VAR(parms, GC_STALETIME)) ||
nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
- parms->delay_probe_time) ||
- nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
- nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
- nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
- nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
+ NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
+ nla_put_msecs(skb, NDTPA_RETRANS_TIME,
+ NEIGH_VAR(parms, RETRANS_TIME)) ||
+ nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
+ NEIGH_VAR(parms, ANYCAST_DELAY)) ||
+ nla_put_msecs(skb, NDTPA_PROXY_DELAY,
+ NEIGH_VAR(parms, PROXY_DELAY)) ||
+ nla_put_msecs(skb, NDTPA_LOCKTIME,
+ NEIGH_VAR(parms, LOCKTIME)))
goto nla_put_failure;
return nla_nest_end(skb, nest);
@@ -2011,44 +2047,57 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
switch (i) {
case NDTPA_QUEUE_LEN:
- p->queue_len_bytes = nla_get_u32(tbp[i]) *
- SKB_TRUESIZE(ETH_FRAME_LEN);
+ NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
+ nla_get_u32(tbp[i]) *
+ SKB_TRUESIZE(ETH_FRAME_LEN));
break;
case NDTPA_QUEUE_LENBYTES:
- p->queue_len_bytes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_PROXY_QLEN:
- p->proxy_qlen = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, PROXY_QLEN,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_APP_PROBES:
- p->app_probes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, APP_PROBES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_UCAST_PROBES:
- p->ucast_probes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, UCAST_PROBES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_MCAST_PROBES:
- p->mcast_probes = nla_get_u32(tbp[i]);
+ NEIGH_VAR_SET(p, MCAST_PROBES,
+ nla_get_u32(tbp[i]));
break;
case NDTPA_BASE_REACHABLE_TIME:
- p->base_reachable_time = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_GC_STALETIME:
- p->gc_staletime = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, GC_STALETIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_DELAY_PROBE_TIME:
- p->delay_probe_time = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_RETRANS_TIME:
- p->retrans_time = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, RETRANS_TIME,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_ANYCAST_DELAY:
- p->anycast_delay = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, ANYCAST_DELAY,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_PROXY_DELAY:
- p->proxy_delay = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, PROXY_DELAY,
+ nla_get_msecs(tbp[i]));
break;
case NDTPA_LOCKTIME:
- p->locktime = nla_get_msecs(tbp[i]);
+ NEIGH_VAR_SET(p, LOCKTIME,
+ nla_get_msecs(tbp[i]));
break;
}
}
@@ -2789,133 +2838,167 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
-enum {
- NEIGH_VAR_MCAST_PROBE,
- NEIGH_VAR_UCAST_PROBE,
- NEIGH_VAR_APP_PROBE,
- NEIGH_VAR_RETRANS_TIME,
- NEIGH_VAR_BASE_REACHABLE_TIME,
- NEIGH_VAR_DELAY_PROBE_TIME,
- NEIGH_VAR_GC_STALETIME,
- NEIGH_VAR_QUEUE_LEN,
- NEIGH_VAR_QUEUE_LEN_BYTES,
- NEIGH_VAR_PROXY_QLEN,
- NEIGH_VAR_ANYCAST_DELAY,
- NEIGH_VAR_PROXY_DELAY,
- NEIGH_VAR_LOCKTIME,
- NEIGH_VAR_RETRANS_TIME_MS,
- NEIGH_VAR_BASE_REACHABLE_TIME_MS,
- NEIGH_VAR_GC_INTERVAL,
- NEIGH_VAR_GC_THRESH1,
- NEIGH_VAR_GC_THRESH2,
- NEIGH_VAR_GC_THRESH3,
- NEIGH_VAR_MAX
-};
+static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
+ int family)
+{
+ switch (family) {
+ case AF_INET:
+ return __in_dev_arp_parms_get_rcu(dev);
+ case AF_INET6:
+ return __in6_dev_nd_parms_get_rcu(dev);
+ }
+ return NULL;
+}
+
+static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
+ int index)
+{
+ struct net_device *dev;
+ int family = neigh_parms_family(p);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ struct neigh_parms *dst_p =
+ neigh_get_dev_parms_rcu(dev, family);
+
+ if (dst_p && !test_bit(index, dst_p->data_state))
+ dst_p->data[index] = p->data[index];
+ }
+ rcu_read_unlock();
+}
+
+static void neigh_proc_update(struct ctl_table *ctl, int write)
+{
+ struct net_device *dev = ctl->extra1;
+ struct neigh_parms *p = ctl->extra2;
+ struct net *net = neigh_parms_net(p);
+ int index = (int *) ctl->data - p->data;
+
+ if (!write)
+ return;
+
+ set_bit(index, p->data_state);
+ if (!dev) /* NULL dev means this is default value */
+ neigh_copy_dflt_parms(net, p, index);
+}
+
+static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table tmp = *ctl;
+ int ret;
+
+ tmp.extra1 = &zero;
+ tmp.extra2 = &int_max;
+
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+
+int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+EXPORT_SYMBOL(neigh_proc_dointvec);
+
+int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
+
+static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+
+int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
+
+static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
+
+ neigh_proc_update(ctl, write);
+ return ret;
+}
+
+#define NEIGH_PARMS_DATA_OFFSET(index) \
+ (&((struct neigh_parms *) 0)->data[index])
+
+#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
+ [NEIGH_VAR_ ## attr] = { \
+ .procname = name, \
+ .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
+ .maxlen = sizeof(int), \
+ .mode = mval, \
+ .proc_handler = proc, \
+ }
+
+#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
+
+#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
+
+#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
+
+#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
+
+#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
+
+#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
+ NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
- [NEIGH_VAR_MCAST_PROBE] = {
- .procname = "mcast_solicit",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_UCAST_PROBE] = {
- .procname = "ucast_solicit",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_APP_PROBE] = {
- .procname = "app_solicit",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_RETRANS_TIME] = {
- .procname = "retrans_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_BASE_REACHABLE_TIME] = {
- .procname = "base_reachable_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NEIGH_VAR_DELAY_PROBE_TIME] = {
- .procname = "delay_first_probe_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NEIGH_VAR_GC_STALETIME] = {
- .procname = "gc_stale_time",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NEIGH_VAR_QUEUE_LEN] = {
- .procname = "unres_qlen",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_unres_qlen,
- },
- [NEIGH_VAR_QUEUE_LEN_BYTES] = {
- .procname = "unres_qlen_bytes",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_PROXY_QLEN] = {
- .procname = "proxy_qlen",
- .maxlen = sizeof(int),
- .mode = 0644,
- .extra1 = &zero,
- .extra2 = &int_max,
- .proc_handler = proc_dointvec_minmax,
- },
- [NEIGH_VAR_ANYCAST_DELAY] = {
- .procname = "anycast_delay",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_PROXY_DELAY] = {
- .procname = "proxy_delay",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_LOCKTIME] = {
- .procname = "locktime",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_userhz_jiffies,
- },
- [NEIGH_VAR_RETRANS_TIME_MS] = {
- .procname = "retrans_time_ms",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- },
- [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
- .procname = "base_reachable_time_ms",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- },
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
+ NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
+ NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
+ NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
+ NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
+ NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
+ NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
+ NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
+ NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
[NEIGH_VAR_GC_INTERVAL] = {
.procname = "gc_interval",
.maxlen = sizeof(int),
@@ -2951,31 +3034,23 @@ static struct neigh_sysctl_table {
};
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- char *p_name, proc_handler *handler)
+ proc_handler *handler)
{
+ int i;
struct neigh_sysctl_table *t;
- const char *dev_name_source = NULL;
+ const char *dev_name_source;
char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
+ char *p_name;
t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
if (!t)
goto err;
- t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
- t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
- t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
- t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
- t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
- t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
- t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
- t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
- t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
- t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
- t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
+ for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
+ t->neigh_vars[i].data += (long) p;
+ t->neigh_vars[i].extra1 = dev;
+ t->neigh_vars[i].extra2 = p;
+ }
if (dev) {
dev_name_source = dev->name;
@@ -2990,26 +3065,32 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
}
-
if (handler) {
/* RetransTime */
t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
/* ReachableTime */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
/* RetransTime (in milliseconds)*/
t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
/* ReachableTime (in milliseconds) */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
- t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
}
/* Don't export sysctls to unprivileged users */
if (neigh_parms_net(p)->user_ns != &init_user_ns)
t->neigh_vars[0].procname = NULL;
+ switch (neigh_parms_family(p)) {
+ case AF_INET:
+ p_name = "ipv4";
+ break;
+ case AF_INET6:
+ p_name = "ipv6";
+ break;
+ default:
+ BUG();
+ }
+
snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
p_name, dev_name_source);
t->sysctl_header =
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f3edf9635e0..93886246a0b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -498,17 +498,7 @@ static struct attribute_group wireless_group = {
#define net_class_groups NULL
#endif /* CONFIG_SYSFS */
-#ifdef CONFIG_RPS
-/*
- * RX queue sysfs structures and functions.
- */
-struct rx_queue_attribute {
- struct attribute attr;
- ssize_t (*show)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, char *buf);
- ssize_t (*store)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, const char *buf, size_t len);
-};
+#ifdef CONFIG_SYSFS
#define to_rx_queue_attr(_attr) container_of(_attr, \
struct rx_queue_attribute, attr)
@@ -543,6 +533,7 @@ static const struct sysfs_ops rx_queue_sysfs_ops = {
.store = rx_queue_attr_store,
};
+#ifdef CONFIG_RPS
static ssize_t show_rps_map(struct netdev_rx_queue *queue,
struct rx_queue_attribute *attribute, char *buf)
{
@@ -676,8 +667,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
while ((mask | (mask >> 1)) != mask)
mask |= (mask >> 1);
/* On 64 bit arches, must check mask fits in table->mask (u32),
- * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
- * doesnt overflow.
+ * and on 32bit arches, must check
+ * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
*/
#if BITS_PER_LONG > 32
if (mask > (unsigned long)(u32)mask)
@@ -718,16 +709,20 @@ static struct rx_queue_attribute rps_cpus_attribute =
static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
__ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
+#endif /* CONFIG_RPS */
static struct attribute *rx_queue_default_attrs[] = {
+#ifdef CONFIG_RPS
&rps_cpus_attribute.attr,
&rps_dev_flow_table_cnt_attribute.attr,
+#endif
NULL
};
static void rx_queue_release(struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
+#ifdef CONFIG_RPS
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
@@ -743,15 +738,29 @@ static void rx_queue_release(struct kobject *kobj)
RCU_INIT_POINTER(queue->rps_flow_table, NULL);
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
}
+#endif
memset(kobj, 0, sizeof(*kobj));
dev_put(queue->dev);
}
+static const void *rx_queue_namespace(struct kobject *kobj)
+{
+ struct netdev_rx_queue *queue = to_rx_queue(kobj);
+ struct device *dev = &queue->dev->dev;
+ const void *ns = NULL;
+
+ if (dev->class && dev->class->ns_type)
+ ns = dev->class->namespace(dev);
+
+ return ns;
+}
+
static struct kobj_type rx_queue_ktype = {
.sysfs_ops = &rx_queue_sysfs_ops,
.release = rx_queue_release,
.default_attrs = rx_queue_default_attrs,
+ .namespace = rx_queue_namespace
};
static int rx_queue_add_kobject(struct net_device *net, int index)
@@ -763,25 +772,36 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
kobj->kset = net->queues_kset;
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
"rx-%u", index);
- if (error) {
- kobject_put(kobj);
- return error;
+ if (error)
+ goto exit;
+
+ if (net->sysfs_rx_queue_group) {
+ error = sysfs_create_group(kobj, net->sysfs_rx_queue_group);
+ if (error)
+ goto exit;
}
kobject_uevent(kobj, KOBJ_ADD);
dev_hold(queue->dev);
return error;
+exit:
+ kobject_put(kobj);
+ return error;
}
-#endif /* CONFIG_RPS */
+#endif /* CONFIG_SYFS */
int
net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
{
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
int i;
int error = 0;
+#ifndef CONFIG_RPS
+ if (!net->sysfs_rx_queue_group)
+ return 0;
+#endif
for (i = old_num; i < new_num; i++) {
error = rx_queue_add_kobject(net, i);
if (error) {
@@ -790,8 +810,12 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
}
}
- while (--i >= new_num)
+ while (--i >= new_num) {
+ if (net->sysfs_rx_queue_group)
+ sysfs_remove_group(&net->_rx[i].kobj,
+ net->sysfs_rx_queue_group);
kobject_put(&net->_rx[i].kobj);
+ }
return error;
#else
@@ -1082,10 +1106,23 @@ static void netdev_queue_release(struct kobject *kobj)
dev_put(queue->dev);
}
+static const void *netdev_queue_namespace(struct kobject *kobj)
+{
+ struct netdev_queue *queue = to_netdev_queue(kobj);
+ struct device *dev = &queue->dev->dev;
+ const void *ns = NULL;
+
+ if (dev->class && dev->class->ns_type)
+ ns = dev->class->namespace(dev);
+
+ return ns;
+}
+
static struct kobj_type netdev_queue_ktype = {
.sysfs_ops = &netdev_queue_sysfs_ops,
.release = netdev_queue_release,
.default_attrs = netdev_queue_default_attrs,
+ .namespace = netdev_queue_namespace,
};
static int netdev_queue_add_kobject(struct net_device *net, int index)
@@ -1155,9 +1192,6 @@ static int register_queue_kobjects(struct net_device *net)
NULL, &net->dev.kobj);
if (!net->queues_kset)
return -ENOMEM;
-#endif
-
-#ifdef CONFIG_RPS
real_rx = net->real_num_rx_queues;
#endif
real_tx = net->real_num_tx_queues;
@@ -1184,7 +1218,7 @@ static void remove_queue_kobjects(struct net_device *net)
{
int real_rx = 0, real_tx = 0;
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
real_rx = net->real_num_rx_queues;
#endif
real_tx = net->real_num_tx_queues;
@@ -1358,7 +1392,7 @@ void netdev_class_remove_file_ns(struct class_attribute *class_attr,
}
EXPORT_SYMBOL(netdev_class_remove_file_ns);
-int netdev_kobject_init(void)
+int __init netdev_kobject_init(void)
{
kobj_ns_type_register(&net_ns_type_operations);
return class_register(&net_class);
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index bd7751ec1c4..2745a1b51e0 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -1,7 +1,7 @@
#ifndef __NET_SYSFS_H__
#define __NET_SYSFS_H__
-int netdev_kobject_init(void);
+int __init netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
new file mode 100644
index 00000000000..719efd54166
--- /dev/null
+++ b/net/core/netclassid_cgroup.c
@@ -0,0 +1,120 @@
+/*
+ * net/core/netclassid_cgroup.c Classid Cgroupfs Handling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/cgroup.h>
+#include <linux/fdtable.h>
+#include <net/cls_cgroup.h>
+#include <net/sock.h>
+
+static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
+}
+
+struct cgroup_cls_state *task_cls_state(struct task_struct *p)
+{
+ return css_cls_state(task_css(p, net_cls_subsys_id));
+}
+EXPORT_SYMBOL_GPL(task_cls_state);
+
+static struct cgroup_subsys_state *
+cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct cgroup_cls_state *cs;
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ return &cs->css;
+}
+
+static int cgrp_css_online(struct cgroup_subsys_state *css)
+{
+ struct cgroup_cls_state *cs = css_cls_state(css);
+ struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
+
+ if (parent)
+ cs->classid = parent->classid;
+
+ return 0;
+}
+
+static void cgrp_css_free(struct cgroup_subsys_state *css)
+{
+ kfree(css_cls_state(css));
+}
+
+static int update_classid(const void *v, struct file *file, unsigned n)
+{
+ int err;
+ struct socket *sock = sock_from_file(file, &err);
+
+ if (sock)
+ sock->sk->sk_classid = (u32)(unsigned long)v;
+
+ return 0;
+}
+
+static void cgrp_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
+{
+ struct cgroup_cls_state *cs = css_cls_state(css);
+ void *v = (void *)(unsigned long)cs->classid;
+ struct task_struct *p;
+
+ cgroup_taskset_for_each(p, css, tset) {
+ task_lock(p);
+ iterate_fd(p->files, 0, update_classid, v);
+ task_unlock(p);
+ }
+}
+
+static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ return css_cls_state(css)->classid;
+}
+
+static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 value)
+{
+ css_cls_state(css)->classid = (u32) value;
+
+ return 0;
+}
+
+static struct cftype ss_files[] = {
+ {
+ .name = "classid",
+ .read_u64 = read_classid,
+ .write_u64 = write_classid,
+ },
+ { } /* terminate */
+};
+
+struct cgroup_subsys net_cls_subsys = {
+ .name = "net_cls",
+ .css_alloc = cgrp_css_alloc,
+ .css_online = cgrp_css_online,
+ .css_free = cgrp_css_free,
+ .attach = cgrp_attach,
+ .subsys_id = net_cls_subsys_id,
+ .base_cftypes = ss_files,
+ .module = THIS_MODULE,
+};
+
+static int __init init_netclassid_cgroup(void)
+{
+ return cgroup_load_subsys(&net_cls_subsys);
+}
+__initcall(init_netclassid_cgroup);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 19fe9c717ce..c03f3dec476 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -520,8 +520,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
skb->protocol = eth->h_proto = htons(ETH_P_IP);
}
- memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
- memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
+ ether_addr_copy(eth->h_source, np->dev->dev_addr);
+ ether_addr_copy(eth->h_dest, np->remote_mac);
skb->dev = np->dev;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 56cbb69ba02..9043caedcd0 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -30,7 +30,7 @@
#define PRIOMAP_MIN_SZ 128
/*
- * Extend @dev->priomap so that it's large enough to accomodate
+ * Extend @dev->priomap so that it's large enough to accommodate
* @target_idx. @dev->priomap.priomap_len > @target_idx after successful
* return. Must be called under rtnl lock.
*/
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a797fff7f22..fdac61cac1b 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -389,6 +389,9 @@ struct pktgen_dev {
#ifdef CONFIG_XFRM
__u8 ipsmode; /* IPSEC mode (config) */
__u8 ipsproto; /* IPSEC type (config) */
+ __u32 spi;
+ struct dst_entry dst;
+ struct dst_ops dstops;
#endif
char result[512];
};
@@ -654,8 +657,11 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
}
#ifdef CONFIG_XFRM
- if (pkt_dev->flags & F_IPSEC_ON)
+ if (pkt_dev->flags & F_IPSEC_ON) {
seq_printf(seq, "IPSEC ");
+ if (pkt_dev->spi)
+ seq_printf(seq, "spi:%u", pkt_dev->spi);
+ }
#endif
if (pkt_dev->flags & F_MACSRC_RND)
@@ -1434,7 +1440,7 @@ static ssize_t pktgen_if_write(struct file *file,
if (!mac_pton(valstr, pkt_dev->dst_mac))
return -EINVAL;
/* Set up Dest MAC */
- memcpy(&pkt_dev->hh[0], pkt_dev->dst_mac, ETH_ALEN);
+ ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac);
sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
return count;
@@ -1451,7 +1457,7 @@ static ssize_t pktgen_if_write(struct file *file,
if (!mac_pton(valstr, pkt_dev->src_mac))
return -EINVAL;
/* Set up Src MAC */
- memcpy(&pkt_dev->hh[6], pkt_dev->src_mac, ETH_ALEN);
+ ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac);
sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
return count;
@@ -1476,7 +1482,18 @@ static ssize_t pktgen_if_write(struct file *file,
sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows);
return count;
}
+#ifdef CONFIG_XFRM
+ if (!strcmp(name, "spi")) {
+ len = num_arg(&user_buffer[i], 10, &value);
+ if (len < 0)
+ return len;
+ i += len;
+ pkt_dev->spi = value;
+ sprintf(pg_result, "OK: spi=%u", pkt_dev->spi);
+ return count;
+ }
+#endif
if (!strcmp(name, "flowlen")) {
len = num_arg(&user_buffer[i], 10, &value);
if (len < 0)
@@ -2043,10 +2060,10 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
- memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN);
+ ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr);
/* Set up Dest MAC */
- memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
+ ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac);
if (pkt_dev->flags & F_IPV6) {
int i, set = 0, err = 1;
@@ -2233,13 +2250,21 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
struct xfrm_state *x = pkt_dev->flows[flow].x;
struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id);
if (!x) {
- /*slow path: we dont already have xfrm_state*/
- x = xfrm_stateonly_find(pn->net, DUMMY_MARK,
- (xfrm_address_t *)&pkt_dev->cur_daddr,
- (xfrm_address_t *)&pkt_dev->cur_saddr,
- AF_INET,
- pkt_dev->ipsmode,
- pkt_dev->ipsproto, 0);
+
+ if (pkt_dev->spi) {
+ /* We need as quick as possible to find the right SA
+ * Searching with minimum criteria to archieve this.
+ */
+ x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
+ } else {
+ /* slow path: we dont already have xfrm_state */
+ x = xfrm_stateonly_find(pn->net, DUMMY_MARK,
+ (xfrm_address_t *)&pkt_dev->cur_daddr,
+ (xfrm_address_t *)&pkt_dev->cur_saddr,
+ AF_INET,
+ pkt_dev->ipsmode,
+ pkt_dev->ipsproto, 0);
+ }
if (x) {
pkt_dev->flows[flow].x = x;
set_pkt_overhead(pkt_dev);
@@ -2475,31 +2500,47 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
#ifdef CONFIG_XFRM
+static u32 pktgen_dst_metrics[RTAX_MAX + 1] = {
+
+ [RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */
+};
+
static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
{
struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
int err = 0;
+ struct net *net = dev_net(pkt_dev->odev);
if (!x)
return 0;
/* XXX: we dont support tunnel mode for now until
* we resolve the dst issue */
- if (x->props.mode != XFRM_MODE_TRANSPORT)
+ if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0))
return 0;
- spin_lock(&x->lock);
+ /* But when user specify an valid SPI, transformation
+ * supports both transport/tunnel mode + ESP/AH type.
+ */
+ if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
+ skb->_skb_refdst = (unsigned long)&pkt_dev->dst | SKB_DST_NOREF;
+ rcu_read_lock_bh();
err = x->outer_mode->output(x, skb);
- if (err)
+ rcu_read_unlock_bh();
+ if (err) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
goto error;
+ }
err = x->type->output(x, skb);
- if (err)
+ if (err) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
goto error;
-
+ }
+ spin_lock_bh(&x->lock);
x->curlft.bytes += skb->len;
x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
error:
- spin_unlock(&x->lock);
return err;
}
@@ -3542,6 +3583,17 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
#ifdef CONFIG_XFRM
pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
pkt_dev->ipsproto = IPPROTO_ESP;
+
+ /* xfrm tunnel mode needs additional dst to extract outter
+ * ip header protocol/ttl/id field, here creat a phony one.
+ * instead of looking for a valid rt, which definitely hurting
+ * performance under such circumstance.
+ */
+ pkt_dev->dstops.family = AF_INET;
+ pkt_dev->dst.dev = pkt_dev->odev;
+ dst_init_metrics(&pkt_dev->dst, pktgen_dst_metrics, false);
+ pkt_dev->dst.child = &pkt_dev->dst;
+ pkt_dev->dst.ops = &pkt_dev->dstops;
#endif
return add_dev_to_thread(t, pkt_dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index cf67144d3e3..393b1bc9a61 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -365,6 +365,22 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops)
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
+static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
+{
+ struct net_device *master_dev;
+ const struct rtnl_link_ops *ops;
+
+ master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
+ if (!master_dev)
+ return 0;
+ ops = master_dev->rtnl_link_ops;
+ if (!ops->get_slave_size)
+ return 0;
+ /* IFLA_INFO_SLAVE_DATA + nested data */
+ return nla_total_size(sizeof(struct nlattr)) +
+ ops->get_slave_size(master_dev, dev);
+}
+
static size_t rtnl_link_get_size(const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
@@ -385,6 +401,8 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
/* IFLA_INFO_XSTATS */
size += nla_total_size(ops->get_xstats_size(dev));
+ size += rtnl_link_get_slave_info_data_size(dev);
+
return size;
}
@@ -403,34 +421,16 @@ static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
}
/**
- * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
- * @ops: struct rtnl_af_ops * to register
- *
- * The caller must hold the rtnl_mutex.
- *
- * Returns 0 on success or a negative error code.
- */
-int __rtnl_af_register(struct rtnl_af_ops *ops)
-{
- list_add_tail(&ops->list, &rtnl_af_ops);
- return 0;
-}
-EXPORT_SYMBOL_GPL(__rtnl_af_register);
-
-/**
* rtnl_af_register - Register rtnl_af_ops with rtnetlink.
* @ops: struct rtnl_af_ops * to register
*
* Returns 0 on success or a negative error code.
*/
-int rtnl_af_register(struct rtnl_af_ops *ops)
+void rtnl_af_register(struct rtnl_af_ops *ops)
{
- int err;
-
rtnl_lock();
- err = __rtnl_af_register(ops);
+ list_add_tail(&ops->list, &rtnl_af_ops);
rtnl_unlock();
- return err;
}
EXPORT_SYMBOL_GPL(rtnl_af_register);
@@ -477,40 +477,100 @@ static size_t rtnl_link_get_af_size(const struct net_device *dev)
return size;
}
-static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
+static bool rtnl_have_link_slave_info(const struct net_device *dev)
{
- const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
- struct nlattr *linkinfo, *data;
- int err = -EMSGSIZE;
+ struct net_device *master_dev;
- linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
- if (linkinfo == NULL)
- goto out;
+ master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
+ if (master_dev && master_dev->rtnl_link_ops)
+ return true;
+ return false;
+}
+
+static int rtnl_link_slave_info_fill(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct net_device *master_dev;
+ const struct rtnl_link_ops *ops;
+ struct nlattr *slave_data;
+ int err;
+
+ master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
+ if (!master_dev)
+ return 0;
+ ops = master_dev->rtnl_link_ops;
+ if (!ops)
+ return 0;
+ if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
+ return -EMSGSIZE;
+ if (ops->fill_slave_info) {
+ slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
+ if (!slave_data)
+ return -EMSGSIZE;
+ err = ops->fill_slave_info(skb, master_dev, dev);
+ if (err < 0)
+ goto err_cancel_slave_data;
+ nla_nest_end(skb, slave_data);
+ }
+ return 0;
+
+err_cancel_slave_data:
+ nla_nest_cancel(skb, slave_data);
+ return err;
+}
+
+static int rtnl_link_info_fill(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+ struct nlattr *data;
+ int err;
+ if (!ops)
+ return 0;
if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
- goto err_cancel_link;
+ return -EMSGSIZE;
if (ops->fill_xstats) {
err = ops->fill_xstats(skb, dev);
if (err < 0)
- goto err_cancel_link;
+ return err;
}
if (ops->fill_info) {
data = nla_nest_start(skb, IFLA_INFO_DATA);
- if (data == NULL) {
- err = -EMSGSIZE;
- goto err_cancel_link;
- }
+ if (data == NULL)
+ return -EMSGSIZE;
err = ops->fill_info(skb, dev);
if (err < 0)
goto err_cancel_data;
nla_nest_end(skb, data);
}
-
- nla_nest_end(skb, linkinfo);
return 0;
err_cancel_data:
nla_nest_cancel(skb, data);
+ return err;
+}
+
+static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct nlattr *linkinfo;
+ int err = -EMSGSIZE;
+
+ linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
+ if (linkinfo == NULL)
+ goto out;
+
+ err = rtnl_link_info_fill(skb, dev);
+ if (err < 0)
+ goto err_cancel_link;
+
+ err = rtnl_link_slave_info_fill(skb, dev);
+ if (err < 0)
+ goto err_cancel_link;
+
+ nla_nest_end(skb, linkinfo);
+ return 0;
+
err_cancel_link:
nla_nest_cancel(skb, linkinfo);
out:
@@ -1019,7 +1079,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (rtnl_port_fill(skb, dev))
goto nla_put_failure;
- if (dev->rtnl_link_ops) {
+ if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
if (rtnl_link_fill(skb, dev) < 0)
goto nla_put_failure;
}
@@ -1142,6 +1202,8 @@ EXPORT_SYMBOL(ifla_policy);
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_KIND] = { .type = NLA_STRING },
[IFLA_INFO_DATA] = { .type = NLA_NESTED },
+ [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
+ [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
};
static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
@@ -1729,7 +1791,9 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
+ const struct rtnl_link_ops *m_ops = NULL;
struct net_device *dev;
+ struct net_device *master_dev = NULL;
struct ifinfomsg *ifm;
char kind[MODULE_NAME_LEN];
char ifname[IFNAMSIZ];
@@ -1759,6 +1823,12 @@ replay:
dev = NULL;
}
+ if (dev) {
+ master_dev = netdev_master_upper_dev_get(dev);
+ if (master_dev)
+ m_ops = master_dev->rtnl_link_ops;
+ }
+
err = validate_linkmsg(dev, tb);
if (err < 0)
return err;
@@ -1780,7 +1850,10 @@ replay:
}
if (1) {
- struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
+ struct nlattr *attr[ops ? ops->maxtype + 1 : 0];
+ struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
+ struct nlattr **data = NULL;
+ struct nlattr **slave_data = NULL;
struct net *dest_net;
if (ops) {
@@ -1799,6 +1872,24 @@ replay:
}
}
+ if (m_ops) {
+ if (m_ops->slave_maxtype &&
+ linkinfo[IFLA_INFO_SLAVE_DATA]) {
+ err = nla_parse_nested(slave_attr,
+ m_ops->slave_maxtype,
+ linkinfo[IFLA_INFO_SLAVE_DATA],
+ m_ops->slave_policy);
+ if (err < 0)
+ return err;
+ slave_data = slave_attr;
+ }
+ if (m_ops->slave_validate) {
+ err = m_ops->slave_validate(tb, slave_data);
+ if (err < 0)
+ return err;
+ }
+ }
+
if (dev) {
int modified = 0;
@@ -1818,6 +1909,17 @@ replay:
modified = 1;
}
+ if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
+ if (!m_ops || !m_ops->slave_changelink)
+ return -EOPNOTSUPP;
+
+ err = m_ops->slave_changelink(master_dev, dev,
+ tb, slave_data);
+ if (err < 0)
+ return err;
+ modified = 1;
+ }
+
return do_setlink(dev, ifm, tb, ifname, modified);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0b5149c5bc4..8f519dbb358 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -65,6 +65,7 @@
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <asm/uaccess.h>
@@ -682,9 +683,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->inner_network_header = old->inner_network_header;
new->inner_mac_header = old->inner_mac_header;
skb_dst_copy(new, old);
- new->rxhash = old->rxhash;
+ skb_copy_hash(new, old);
new->ooo_okay = old->ooo_okay;
- new->l4_rxhash = old->l4_rxhash;
new->no_fcs = old->no_fcs;
new->encapsulation = old->encapsulation;
#ifdef CONFIG_XFRM
@@ -2092,6 +2092,91 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
+ /**
+ * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
+ * @from: source buffer
+ *
+ * Calculates the amount of linear headroom needed in the 'to' skb passed
+ * into skb_zerocopy().
+ */
+unsigned int
+skb_zerocopy_headlen(const struct sk_buff *from)
+{
+ unsigned int hlen = 0;
+
+ if (!from->head_frag ||
+ skb_headlen(from) < L1_CACHE_BYTES ||
+ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+ hlen = skb_headlen(from);
+
+ if (skb_has_frag_list(from))
+ hlen = from->len;
+
+ return hlen;
+}
+EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
+
+/**
+ * skb_zerocopy - Zero copy skb to skb
+ * @to: destination buffer
+ * @source: source buffer
+ * @len: number of bytes to copy from source buffer
+ * @hlen: size of linear headroom in destination buffer
+ *
+ * Copies up to `len` bytes from `from` to `to` by creating references
+ * to the frags in the source buffer.
+ *
+ * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
+ * headroom in the `to` buffer.
+ */
+void
+skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+{
+ int i, j = 0;
+ int plen = 0; /* length of skb->head fragment */
+ struct page *page;
+ unsigned int offset;
+
+ BUG_ON(!from->head_frag && !hlen);
+
+ /* dont bother with small payloads */
+ if (len <= skb_tailroom(to)) {
+ skb_copy_bits(from, 0, skb_put(to, len), len);
+ return;
+ }
+
+ if (hlen) {
+ skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+ len -= hlen;
+ } else {
+ plen = min_t(int, skb_headlen(from), len);
+ if (plen) {
+ page = virt_to_head_page(from->head);
+ offset = from->data - (unsigned char *)page_address(page);
+ __skb_fill_page_desc(to, 0, page, offset, plen);
+ get_page(page);
+ j = 1;
+ len -= plen;
+ }
+ }
+
+ to->truesize += len + plen;
+ to->len += len + plen;
+ to->data_len += len + plen;
+
+ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
+ if (!len)
+ break;
+ skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
+ skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
+ len -= skb_shinfo(to)->frags[j].size;
+ skb_frag_ref(to, j);
+ j++;
+ }
+ skb_shinfo(to)->nr_frags = j;
+}
+EXPORT_SYMBOL_GPL(skb_zerocopy);
+
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
{
__wsum csum;
@@ -2952,10 +3037,7 @@ perform_csum_check:
return segs;
err:
- while ((skb = segs)) {
- segs = skb->next;
- kfree_skb(skb);
- }
+ kfree_skb_list(segs);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(skb_segment);
@@ -3438,6 +3520,278 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
}
EXPORT_SYMBOL_GPL(skb_partial_csum_set);
+static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
+ unsigned int max)
+{
+ if (skb_headlen(skb) >= len)
+ return 0;
+
+ /* If we need to pullup then pullup to the max, so we
+ * won't need to do it again.
+ */
+ if (max > skb->len)
+ max = skb->len;
+
+ if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
+ return -ENOMEM;
+
+ if (skb_headlen(skb) < len)
+ return -EPROTO;
+
+ return 0;
+}
+
+/* This value should be large enough to cover a tagged ethernet header plus
+ * maximally sized IP and TCP or UDP headers.
+ */
+#define MAX_IP_HDR_LEN 128
+
+static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
+{
+ unsigned int off;
+ bool fragment;
+ int err;
+
+ fragment = false;
+
+ err = skb_maybe_pull_tail(skb,
+ sizeof(struct iphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
+ fragment = true;
+
+ off = ip_hdrlen(skb);
+
+ err = -EPROTO;
+
+ if (fragment)
+ goto out;
+
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ err = skb_maybe_pull_tail(skb,
+ off + sizeof(struct tcphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
+ goto out;
+ }
+
+ if (recalculate)
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
+ break;
+ case IPPROTO_UDP:
+ err = skb_maybe_pull_tail(skb,
+ off + sizeof(struct udphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
+ goto out;
+ }
+
+ if (recalculate)
+ udp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
+ break;
+ default:
+ goto out;
+ }
+
+ err = 0;
+
+out:
+ return err;
+}
+
+/* This value should be large enough to cover a tagged ethernet header plus
+ * an IPv6 header, all options, and a maximal TCP or UDP header.
+ */
+#define MAX_IPV6_HDR_LEN 256
+
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
+{
+ int err;
+ u8 nexthdr;
+ unsigned int off;
+ unsigned int len;
+ bool fragment;
+ bool done;
+
+ fragment = false;
+ done = false;
+
+ off = sizeof(struct ipv6hdr);
+
+ err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+
+ len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
+ while (off <= len && !done) {
+ switch (nexthdr) {
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING: {
+ struct ipv6_opt_hdr *hp;
+
+ err = skb_maybe_pull_tail(skb,
+ off +
+ sizeof(struct ipv6_opt_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+ nexthdr = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ break;
+ }
+ case IPPROTO_AH: {
+ struct ip_auth_hdr *hp;
+
+ err = skb_maybe_pull_tail(skb,
+ off +
+ sizeof(struct ip_auth_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ hp = OPT_HDR(struct ip_auth_hdr, skb, off);
+ nexthdr = hp->nexthdr;
+ off += ipv6_authlen(hp);
+ break;
+ }
+ case IPPROTO_FRAGMENT: {
+ struct frag_hdr *hp;
+
+ err = skb_maybe_pull_tail(skb,
+ off +
+ sizeof(struct frag_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ hp = OPT_HDR(struct frag_hdr, skb, off);
+
+ if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
+ fragment = true;
+
+ nexthdr = hp->nexthdr;
+ off += sizeof(struct frag_hdr);
+ break;
+ }
+ default:
+ done = true;
+ break;
+ }
+ }
+
+ err = -EPROTO;
+
+ if (!done || fragment)
+ goto out;
+
+ switch (nexthdr) {
+ case IPPROTO_TCP:
+ err = skb_maybe_pull_tail(skb,
+ off + sizeof(struct tcphdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
+ goto out;
+ }
+
+ if (recalculate)
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
+ break;
+ case IPPROTO_UDP:
+ err = skb_maybe_pull_tail(skb,
+ off + sizeof(struct udphdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
+ goto out;
+ }
+
+ if (recalculate)
+ udp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
+ break;
+ default:
+ goto out;
+ }
+
+ err = 0;
+
+out:
+ return err;
+}
+
+/**
+ * skb_checksum_setup - set up partial checksum offset
+ * @skb: the skb to set up
+ * @recalculate: if true the pseudo-header checksum will be recalculated
+ */
+int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
+{
+ int err;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ err = skb_checksum_setup_ip(skb, recalculate);
+ break;
+
+ case htons(ETH_P_IPV6):
+ err = skb_checksum_setup_ipv6(skb, recalculate);
+ break;
+
+ default:
+ err = -EPROTO;
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(skb_checksum_setup);
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb)
{
net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
diff --git a/net/core/sock.c b/net/core/sock.c
index 5393b4b719d..0c127dcdf6a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -925,8 +925,8 @@ set_rcvbuf:
EXPORT_SYMBOL(sock_setsockopt);
-void cred_to_ucred(struct pid *pid, const struct cred *cred,
- struct ucred *ucred)
+static void cred_to_ucred(struct pid *pid, const struct cred *cred,
+ struct ucred *ucred)
{
ucred->pid = pid_vnr(pid);
ucred->uid = ucred->gid = -1;
@@ -937,7 +937,6 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
ucred->gid = from_kgid_munged(current_ns, cred->egid);
}
}
-EXPORT_SYMBOL_GPL(cred_to_ucred);
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
@@ -1168,6 +1167,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
break;
+ case SO_BPF_EXTENSIONS:
+ v.val = bpf_tell_extensions();
+ break;
+
case SO_SELECT_ERR_QUEUE:
v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
break;
@@ -1308,19 +1311,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
module_put(owner);
}
-#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
-void sock_update_classid(struct sock *sk)
-{
- u32 classid;
-
- classid = task_cls_classid(current);
- if (classid != sk->sk_classid)
- sk->sk_classid = classid;
-}
-EXPORT_SYMBOL(sock_update_classid);
-#endif
-
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
void sock_update_netprioidx(struct sock *sk)
{
if (in_interrupt())
@@ -1666,22 +1657,6 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
EXPORT_SYMBOL(sock_wmalloc);
/*
- * Allocate a skb from the socket's receive buffer.
- */
-struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
- gfp_t priority)
-{
- if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
- struct sk_buff *skb = alloc_skb(size, priority);
- if (skb) {
- skb_set_owner_r(skb, sk);
- return skb;
- }
- }
- return NULL;
-}
-
-/*
* Allocate a memory block from the socket's option memory buffer.
*/
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
@@ -1865,9 +1840,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
put_page(pfrag->page);
}
- /* We restrict high order allocations to users that can afford to wait */
- order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
-
+ order = SKB_FRAG_PAGE_ORDER;
do {
gfp_t gfp = prio;
diff --git a/net/core/stream.c b/net/core/stream.c
index 512f0a24269..301c05f2606 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -122,7 +122,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
DEFINE_WAIT(wait);
if (sk_stream_memory_free(sk))
- current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
+ current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
while (1) {
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cca44419090..cf9cd13509a 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -122,7 +122,8 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
synchronize_rcu();
kfree(cur);
} else if (!cur && cpumask_test_cpu(i, mask)) {
- cur = kzalloc(len, GFP_KERNEL);
+ cur = kzalloc_node(len, GFP_KERNEL,
+ cpu_to_node(i));
if (!cur) {
/* not unwinding previous changes */
ret = -ENOMEM;