aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/team/team.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/team/team.c')
-rw-r--r--drivers/net/team/team.c276
1 files changed, 221 insertions, 55 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bff7e0b0b4e..b4958c7ffa8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
}
+/*********************
+ * Peers notification
+ *********************/
+
+static void team_notify_peers_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, notify_peers.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ schedule_delayed_work(&team->notify_peers.dw,
+ msecs_to_jiffies(team->notify_peers.interval));
+}
+
+static void team_notify_peers(struct team *team)
+{
+ if (!team->notify_peers.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+}
+
+static void team_notify_peers_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
+}
+
+static void team_notify_peers_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->notify_peers.dw);
+}
+
+
+/*******************************
+ * Send multicast group rejoins
+ *******************************/
+
+static void team_mcast_rejoin_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, mcast_rejoin.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ schedule_delayed_work(&team->mcast_rejoin.dw,
+ msecs_to_jiffies(team->mcast_rejoin.interval));
+}
+
+static void team_mcast_rejoin(struct team *team)
+{
+ if (!team->mcast_rejoin.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+}
+
+static void team_mcast_rejoin_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
+}
+
+static void team_mcast_rejoin_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->mcast_rejoin.dw);
+}
+
+
/************************
* Rx path frame handler
************************/
@@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -884,7 +968,7 @@ static void team_port_disable(struct team *team,
static void __team_compute_features(struct team *team)
{
struct team_port *port;
- u32 vlan_features = TEAM_VLAN_FEATURES;
+ u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
@@ -947,17 +1031,19 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
struct netpoll *np;
int err;
- np = kzalloc(sizeof(*np), gfp);
+ if (!team->dev->npinfo)
+ return 0;
+
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
- err = __netpoll_setup(np, port->dev, gfp);
+ err = __netpoll_setup(np, port->dev);
if (err) {
kfree(np);
return err;
@@ -979,25 +1065,14 @@ static void team_port_disable_netpoll(struct team_port *port)
__netpoll_cleanup(np);
kfree(np);
}
-
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return team->dev->npinfo;
-}
-
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
return 0;
}
static void team_port_disable_netpoll(struct team_port *port)
{
}
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return NULL;
-}
#endif
static void __team_port_change_port_added(struct team_port *port, bool linkup);
@@ -1079,13 +1154,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- if (team_netpoll_info(team)) {
- err = team_port_enable_netpoll(team, port, GFP_KERNEL);
- if (err) {
- netdev_err(dev, "Failed to enable netpoll on device %s\n",
- portname);
- goto err_enable_netpoll;
- }
+ err = team_port_enable_netpoll(team, port);
+ if (err) {
+ netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ portname);
+ goto err_enable_netpoll;
}
err = netdev_master_upper_dev_link(port_dev, dev);
@@ -1205,6 +1278,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
return team_change_mode(team, ctx->data.str_val);
}
+static int team_notify_peers_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.count;
+ return 0;
+}
+
+static int team_notify_peers_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_notify_peers_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.interval;
+ return 0;
+}
+
+static int team_notify_peers_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.interval = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.count;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.interval;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.interval = ctx->data.u32_val;
+ return 0;
+}
+
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
@@ -1235,6 +1364,8 @@ static int team_user_linkup_option_get(struct team *team,
return 0;
}
+static void __team_carrier_check(struct team *team);
+
static int team_user_linkup_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
@@ -1242,6 +1373,7 @@ static int team_user_linkup_option_set(struct team *team,
port->user.linkup = ctx->data.bool_val;
team_refresh_port_linkup(port);
+ __team_carrier_check(port->team);
return 0;
}
@@ -1261,6 +1393,7 @@ static int team_user_linkup_en_option_set(struct team *team,
port->user.linkup_enabled = ctx->data.bool_val;
team_refresh_port_linkup(port);
+ __team_carrier_check(port->team);
return 0;
}
@@ -1317,6 +1450,30 @@ static const struct team_option team_options[] = {
.setter = team_mode_option_set,
},
{
+ .name = "notify_peers_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_count_get,
+ .setter = team_notify_peers_count_set,
+ },
+ {
+ .name = "notify_peers_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_interval_get,
+ .setter = team_notify_peers_interval_set,
+ },
+ {
+ .name = "mcast_rejoin_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_count_get,
+ .setter = team_mcast_rejoin_count_set,
+ },
+ {
+ .name = "mcast_rejoin_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_interval_get,
+ .setter = team_mcast_rejoin_interval_set,
+ },
+ {
.name = "enabled",
.type = TEAM_OPTION_TYPE_BOOL,
.per_port = true,
@@ -1381,7 +1538,7 @@ static int team_init(struct net_device *dev)
mutex_init(&team->lock);
team_set_no_mode(team);
- team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
return -ENOMEM;
@@ -1396,6 +1553,10 @@ static int team_init(struct net_device *dev)
INIT_LIST_HEAD(&team->option_list);
INIT_LIST_HEAD(&team->option_inst_list);
+
+ team_notify_peers_init(team);
+ team_mcast_rejoin_init(team);
+
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
if (err)
goto err_options_register;
@@ -1406,6 +1567,8 @@ static int team_init(struct net_device *dev)
return 0;
err_options_register:
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
err_team_queue_override_init:
free_percpu(team->pcpu_stats);
@@ -1425,6 +1588,8 @@ static void team_uninit(struct net_device *dev)
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
@@ -1474,7 +1639,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -1558,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
* to traverse list in reverse under rcu_read_lock
*/
mutex_lock(&team->lock);
+ team->port_mtu_change_allowed = true;
list_for_each_entry(port, &team->port_list, list) {
err = dev_set_mtu(port->dev, new_mtu);
if (err) {
@@ -1566,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
goto unwind;
}
}
+ team->port_mtu_change_allowed = false;
mutex_unlock(&team->lock);
dev->mtu = new_mtu;
@@ -1575,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
dev_set_mtu(port->dev, dev->mtu);
+ team->port_mtu_change_allowed = false;
mutex_unlock(&team->lock);
return err;
@@ -1593,13 +1762,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
for_each_possible_cpu(i) {
p = per_cpu_ptr(team->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1682,7 +1851,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
}
static int team_netpoll_setup(struct net_device *dev,
- struct netpoll_info *npifo, gfp_t gfp)
+ struct netpoll_info *npifo)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
@@ -1690,7 +1859,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port, gfp);
+ err = team_port_enable_netpoll(team, port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -1811,7 +1980,7 @@ static void team_setup_by_port(struct net_device *dev,
dev->addr_len = port_dev->addr_len;
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
- memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+ eth_hw_addr_inherit(dev, port_dev);
}
static int team_dev_type_check_change(struct net_device *dev,
@@ -1860,6 +2029,10 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
+
+ /* Don't allow team devices to change network namespaces. */
+ dev->features |= NETIF_F_NETNS_LOCAL;
+
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -2481,7 +2654,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
return err;
}
-static struct genl_ops team_nl_ops[] = {
+static const struct genl_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
.doit = team_nl_cmd_noop,
@@ -2507,15 +2680,15 @@ static struct genl_ops team_nl_ops[] = {
},
};
-static struct genl_multicast_group team_change_event_mcgrp = {
- .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+static const struct genl_multicast_group team_nl_mcgrps[] = {
+ { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
};
static int team_nl_send_multicast(struct sk_buff *skb,
struct team *team, u32 portid)
{
- return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
- team_change_event_mcgrp.id, GFP_KERNEL);
+ return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
+ skb, 0, 0, GFP_KERNEL);
}
static int team_nl_send_event_options_get(struct team *team,
@@ -2534,23 +2707,8 @@ static int team_nl_send_event_port_get(struct team *team,
static int team_nl_init(void)
{
- int err;
-
- err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
- ARRAY_SIZE(team_nl_ops));
- if (err)
- return err;
-
- err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
- if (err)
- goto err_change_event_grp_reg;
-
- return 0;
-
-err_change_event_grp_reg:
- genl_unregister_family(&team_nl_family);
-
- return err;
+ return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
+ team_nl_mcgrps);
}
static void team_nl_fini(void)
@@ -2679,8 +2837,10 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_UP:
if (netif_carrier_ok(dev))
team_port_change_check(port, true);
+ break;
case NETDEV_DOWN:
team_port_change_check(port, false);
+ break;
case NETDEV_CHANGE:
if (netif_running(port->dev))
team_port_change_check(port,
@@ -2692,12 +2852,18 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
team_compute_features(port->team);
break;
- case NETDEV_CHANGEMTU:
+ case NETDEV_PRECHANGEMTU:
/* Forbid to change mtu of underlaying device */
- return NOTIFY_BAD;
+ if (!port->team->port_mtu_change_allowed)
+ return NOTIFY_BAD;
+ break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid to change type of underlaying device */
return NOTIFY_BAD;
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, port->team->dev);
+ break;
}
return NOTIFY_DONE;
}