aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-05 09:20:08 +0100
committerIngo Molnar <mingo@elte.hu>2012-03-05 09:20:08 +0100
commit737f24bda723fdf89ecaacb99fa2bf5683c32799 (patch)
tree35495fff3e9956679cb5468e74e6814c8e44ee66 /net/core
parent8eedce996556d7d06522cd3a0e6069141c8dffe0 (diff)
parentb7c924274c456499264d1cfa3d44063bb11eb5db (diff)
Merge branch 'perf/urgent' into perf/core
Conflicts: tools/perf/builtin-record.c tools/perf/builtin-top.c tools/perf/perf.h tools/perf/util/top.h Merge reason: resolve these cherry-picking conflicts. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/rtnetlink.c78
-rw-r--r--net/core/sock.c7
7 files changed, 77 insertions, 39 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index da7ce7f0e56..6982bfd6a78 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff *p;
+ unsigned int maclen = skb->dev->hard_header_len;
for (p = napi->gro_list; p; p = p->next) {
unsigned long diffs;
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= p->vlan_tci ^ skb->vlan_tci;
- diffs |= compare_ether_header(skb_mac_header(p),
- skb_gro_mac_header(skb));
+ if (maclen == ETH_HLEN)
+ diffs |= compare_ether_header(skb_mac_header(p),
+ skb_gro_mac_header(skb));
+ else if (!diffs)
+ diffs = memcmp(skb_mac_header(p),
+ skb_gro_mac_header(skb),
+ maclen);
NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0;
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b4189452..3f79db1b612 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
if (!dev->ethtool_ops->flash_device)
return -EOPNOTSUPP;
+ efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
+
return dev->ethtool_ops->flash_device(dev, &efl);
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e287346e093..2a83914b027 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -826,6 +826,8 @@ next_elt:
write_unlock_bh(&tbl->lock);
cond_resched();
write_lock_bh(&tbl->lock);
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
}
/* Cycle through all hash buckets every base_reachable_time/2 ticks.
* ARP entry timeouts range from 1/2 base_reachable_time to 3/2
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b0829866..ddefc513b44 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
poll_napi(dev);
- if (dev->priv_flags & IFF_SLAVE) {
+ if (dev->flags & IFF_SLAVE) {
if (dev->npinfo) {
struct net_device *bond_dev = dev->master;
struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b7..4dacc44637e 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
spin_lock_irqsave(&prioidx_map_lock, flags);
prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
+ if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
+ spin_unlock_irqrestore(&prioidx_map_lock, flags);
+ return -ENOSPC;
+ }
set_bit(prioidx, prioidx_map);
spin_unlock_irqrestore(&prioidx_map_lock, flags);
- if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
- return -ENOSPC;
-
atomic_set(&max_prioidx, prioidx);
*prio = prioidx;
return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
static void update_netdev_tables(void)
{
struct net_device *dev;
- u32 max_len = atomic_read(&max_prioidx);
+ u32 max_len = atomic_read(&max_prioidx) + 1;
struct netprio_map *map;
rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
{
struct net_device *dev = ptr;
struct netprio_map *old;
- u32 max_len = atomic_read(&max_prioidx);
/*
* Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
*/
switch (event) {
-
- case NETDEV_REGISTER:
- if (max_len)
- extend_netdev_table(dev, max_len);
- break;
case NETDEV_UNREGISTER:
old = rtnl_dereference(dev->priomap);
RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 65aebd45002..606a6e8f367 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -60,7 +60,6 @@ struct rtnl_link {
};
static DEFINE_MUTEX(rtnl_mutex);
-static u16 min_ifinfo_dump_size;
void rtnl_lock(void)
{
@@ -724,10 +723,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
}
/* All VF info */
-static inline int rtnl_vfinfo_size(const struct net_device *dev)
+static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
- if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
-
+ if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
+ (ext_filter_mask & RTEXT_FILTER_VF)) {
int num_vfs = dev_num_vf(dev->dev.parent);
size_t size = nla_total_size(sizeof(struct nlattr));
size += nla_total_size(num_vfs * sizeof(struct nlattr));
@@ -766,7 +766,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
return port_self_size;
}
-static noinline size_t if_nlmsg_size(const struct net_device *dev)
+static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -784,8 +785,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
- + nla_total_size(4) /* IFLA_NUM_VF */
- + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
+ + nla_total_size(ext_filter_mask
+ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
@@ -868,7 +870,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
- unsigned int flags)
+ unsigned int flags, u32 ext_filter_mask)
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
@@ -941,10 +943,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure;
copy_rtnl_link_stats64(nla_data(attr), stats);
- if (dev->dev.parent)
+ if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
- if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
+ if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
+ && (ext_filter_mask & RTEXT_FILTER_VF)) {
int i;
struct nlattr *vfinfo, *vf;
@@ -1048,6 +1051,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
struct hlist_head *head;
struct hlist_node *node;
+ struct nlattr *tb[IFLA_MAX+1];
+ u32 ext_filter_mask = 0;
s_h = cb->args[0];
s_idx = cb->args[1];
@@ -1055,6 +1060,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->dev_base_seq;
+ nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+ ifla_policy);
+
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
@@ -1064,7 +1075,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, 0,
- NLM_F_MULTI) <= 0)
+ NLM_F_MULTI,
+ ext_filter_mask) <= 0)
goto out;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1100,6 +1112,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_VF_PORTS] = { .type = NLA_NESTED },
[IFLA_PORT_SELF] = { .type = NLA_NESTED },
[IFLA_AF_SPEC] = { .type = NLA_NESTED },
+ [IFLA_EXT_MASK] = { .type = NLA_U32 },
};
EXPORT_SYMBOL(ifla_policy);
@@ -1509,8 +1522,6 @@ errout:
if (send_addr_notify)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
- min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev),
- min_ifinfo_dump_size);
return err;
}
@@ -1842,6 +1853,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
struct net_device *dev = NULL;
struct sk_buff *nskb;
int err;
+ u32 ext_filter_mask = 0;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
@@ -1850,6 +1862,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
@@ -1861,12 +1876,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
if (dev == NULL)
return -ENODEV;
- nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+ nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
if (nskb == NULL)
return -ENOBUFS;
err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
- nlh->nlmsg_seq, 0, 0);
+ nlh->nlmsg_seq, 0, 0, ext_filter_mask);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
@@ -1877,8 +1892,31 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
return err;
}
-static u16 rtnl_calcit(struct sk_buff *skb)
+static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
{
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ struct nlattr *tb[IFLA_MAX+1];
+ u32 ext_filter_mask = 0;
+ u16 min_ifinfo_dump_size = 0;
+
+ nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
+
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+
+ if (!ext_filter_mask)
+ return NLMSG_GOODSIZE;
+ /*
+ * traverse the list of net devices and compute the minimum
+ * buffer size based upon the filter mask.
+ */
+ list_for_each_entry(dev, &net->dev_base_head, dev_list) {
+ min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
+ if_nlmsg_size(dev,
+ ext_filter_mask));
+ }
+
return min_ifinfo_dump_size;
}
@@ -1913,13 +1951,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
int err = -ENOBUFS;
size_t if_info_size;
- skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL);
+ skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
if (skb == NULL)
goto errout;
- min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size);
-
- err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
+ err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -1977,7 +2013,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EOPNOTSUPP;
calcit = rtnl_get_calcit(family, type);
if (calcit)
- min_dump_alloc = calcit(skb);
+ min_dump_alloc = calcit(skb, nlh);
__rtnl_unlock();
rtnl = net->rtnl;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3a4e5817a2a..95aff9c7419 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
void sock_update_netprioidx(struct sock *sk)
{
- struct cgroup_netprio_state *state;
if (in_interrupt())
return;
- rcu_read_lock();
- state = task_netprio_state(current);
- sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
- rcu_read_unlock();
+
+ sk->sk_cgrp_prioidx = task_netprioidx(current);
}
EXPORT_SYMBOL_GPL(sock_update_netprioidx);
#endif