aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/team/team.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/team/team.c')
-rw-r--r--drivers/net/team/team.c418
1 files changed, 310 insertions, 108 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 05c5efe8459..b4958c7ffa8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -73,11 +73,24 @@ static int team_port_set_orig_dev_addr(struct team_port *port)
return __set_port_dev_addr(port->dev, port->orig.dev_addr);
}
-int team_port_set_team_dev_addr(struct team_port *port)
+static int team_port_set_team_dev_addr(struct team *team,
+ struct team_port *port)
+{
+ return __set_port_dev_addr(port->dev, team->dev->dev_addr);
+}
+
+int team_modeop_port_enter(struct team *team, struct team_port *port)
{
- return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
+ return team_port_set_team_dev_addr(team, port);
}
-EXPORT_SYMBOL(team_port_set_team_dev_addr);
+EXPORT_SYMBOL(team_modeop_port_enter);
+
+void team_modeop_port_change_dev_addr(struct team *team,
+ struct team_port *port)
+{
+ team_port_set_team_dev_addr(team, port);
+}
+EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
static void team_refresh_port_linkup(struct team_port *port)
{
@@ -490,9 +503,9 @@ static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
return false;
}
-rx_handler_result_t team_dummy_receive(struct team *team,
- struct team_port *port,
- struct sk_buff *skb)
+static rx_handler_result_t team_dummy_receive(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb)
{
return RX_HANDLER_ANOTHER;
}
@@ -512,31 +525,26 @@ static void team_set_no_mode(struct team *team)
team->mode = &__team_no_mode;
}
-static void __team_adjust_ops(struct team *team, int en_port_count)
+static void team_adjust_ops(struct team *team)
{
/*
* To avoid checks in rx/tx skb paths, ensure here that non-null and
* correct ops are always set.
*/
- if (!en_port_count || !team_is_mode_set(team) ||
+ if (!team->en_port_count || !team_is_mode_set(team) ||
!team->mode->ops->transmit)
team->ops.transmit = team_dummy_transmit;
else
team->ops.transmit = team->mode->ops->transmit;
- if (!en_port_count || !team_is_mode_set(team) ||
+ if (!team->en_port_count || !team_is_mode_set(team) ||
!team->mode->ops->receive)
team->ops.receive = team_dummy_receive;
else
team->ops.receive = team->mode->ops->receive;
}
-static void team_adjust_ops(struct team *team)
-{
- __team_adjust_ops(team, team->en_port_count);
-}
-
/*
* We can benefit from the fact that it's ensured no port is present
* at the time of mode change. Therefore no packets are in fly so there's no
@@ -614,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
}
+/*********************
+ * Peers notification
+ *********************/
+
+static void team_notify_peers_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, notify_peers.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ schedule_delayed_work(&team->notify_peers.dw,
+ msecs_to_jiffies(team->notify_peers.interval));
+}
+
+static void team_notify_peers(struct team *team)
+{
+ if (!team->notify_peers.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+}
+
+static void team_notify_peers_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
+}
+
+static void team_notify_peers_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->notify_peers.dw);
+}
+
+
+/*******************************
+ * Send multicast group rejoins
+ *******************************/
+
+static void team_mcast_rejoin_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, mcast_rejoin.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ schedule_delayed_work(&team->mcast_rejoin.dw,
+ msecs_to_jiffies(team->mcast_rejoin.interval));
+}
+
+static void team_mcast_rejoin(struct team *team)
+{
+ if (!team->mcast_rejoin.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+}
+
+static void team_mcast_rejoin_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
+}
+
+static void team_mcast_rejoin_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->mcast_rejoin.dw);
+}
+
+
/************************
* Rx path frame handler
************************/
@@ -712,9 +800,9 @@ static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
static void __team_queue_override_port_del(struct team *team,
struct team_port *port)
{
+ if (!port->queue_id)
+ return;
list_del_rcu(&port->qom_list);
- synchronize_rcu();
- INIT_LIST_HEAD(&port->qom_list);
}
static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
@@ -736,9 +824,8 @@ static void __team_queue_override_port_add(struct team *team,
struct list_head *qom_list;
struct list_head *node;
- if (!port->queue_id || !team_port_enabled(port))
+ if (!port->queue_id)
return;
-
qom_list = __team_get_qom_list(team, port->queue_id);
node = qom_list;
list_for_each_entry(cur, qom_list, qom_list) {
@@ -755,7 +842,7 @@ static void __team_queue_override_enabled_check(struct team *team)
bool enabled = false;
list_for_each_entry(port, &team->port_list, list) {
- if (!list_empty(&port->qom_list)) {
+ if (port->queue_id) {
enabled = true;
break;
}
@@ -767,14 +854,44 @@ static void __team_queue_override_enabled_check(struct team *team)
team->queue_override_enabled = enabled;
}
-static void team_queue_override_port_refresh(struct team *team,
- struct team_port *port)
+static void team_queue_override_port_prio_changed(struct team *team,
+ struct team_port *port)
{
+ if (!port->queue_id || team_port_enabled(port))
+ return;
__team_queue_override_port_del(team, port);
__team_queue_override_port_add(team, port);
__team_queue_override_enabled_check(team);
}
+static void team_queue_override_port_change_queue_id(struct team *team,
+ struct team_port *port,
+ u16 new_queue_id)
+{
+ if (team_port_enabled(port)) {
+ __team_queue_override_port_del(team, port);
+ port->queue_id = new_queue_id;
+ __team_queue_override_port_add(team, port);
+ __team_queue_override_enabled_check(team);
+ } else {
+ port->queue_id = new_queue_id;
+ }
+}
+
+static void team_queue_override_port_add(struct team *team,
+ struct team_port *port)
+{
+ __team_queue_override_port_add(team, port);
+ __team_queue_override_enabled_check(team);
+}
+
+static void team_queue_override_port_del(struct team *team,
+ struct team_port *port)
+{
+ __team_queue_override_port_del(team, port);
+ __team_queue_override_enabled_check(team);
+}
+
/****************
* Port handling
@@ -806,9 +923,11 @@ static void team_port_enable(struct team *team,
hlist_add_head_rcu(&port->hlist,
team_port_index_hash(team, port->index));
team_adjust_ops(team);
- team_queue_override_port_refresh(team, port);
+ team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -835,14 +954,11 @@ static void team_port_disable(struct team *team,
hlist_del_rcu(&port->hlist);
__reconstruct_port_hlist(team, port->index);
port->index = -1;
- team_queue_override_port_refresh(team, port);
- __team_adjust_ops(team, team->en_port_count - 1);
- /*
- * Wait until readers see adjusted ops. This ensures that
- * readers never see team->en_port_count == 0
- */
- synchronize_rcu();
team->en_port_count--;
+ team_queue_override_port_del(team, port);
+ team_adjust_ops(team);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -852,7 +968,7 @@ static void team_port_disable(struct team *team,
static void __team_compute_features(struct team *team)
{
struct team_port *port;
- u32 vlan_features = TEAM_VLAN_FEATURES;
+ u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
@@ -915,17 +1031,19 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
struct netpoll *np;
int err;
- np = kzalloc(sizeof(*np), gfp);
+ if (!team->dev->npinfo)
+ return 0;
+
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
- err = __netpoll_setup(np, port->dev, gfp);
+ err = __netpoll_setup(np, port->dev);
if (err) {
kfree(np);
return err;
@@ -947,25 +1065,14 @@ static void team_port_disable_netpoll(struct team_port *port)
__netpoll_cleanup(np);
kfree(np);
}
-
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return team->dev->npinfo;
-}
-
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
return 0;
}
static void team_port_disable_netpoll(struct team_port *port)
{
}
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return NULL;
-}
#endif
static void __team_port_change_port_added(struct team_port *port, bool linkup);
@@ -1047,13 +1154,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- if (team_netpoll_info(team)) {
- err = team_port_enable_netpoll(team, port, GFP_KERNEL);
- if (err) {
- netdev_err(dev, "Failed to enable netpoll on device %s\n",
- portname);
- goto err_enable_netpoll;
- }
+ err = team_port_enable_netpoll(team, port);
+ if (err) {
+ netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ portname);
+ goto err_enable_netpoll;
}
err = netdev_master_upper_dev_link(port_dev, dev);
@@ -1079,8 +1184,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
}
port->index = -1;
- team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list);
+ team_port_enable(team, port);
__team_compute_features(team);
__team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
__team_options_change_check(team);
@@ -1138,6 +1243,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_upper_dev_unlink(port_dev, dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
dev_close(port_dev);
team_port_leave(team, port);
@@ -1148,8 +1255,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
team_port_set_orig_dev_addr(port);
dev_set_mtu(port_dev, port->orig.mtu);
- synchronize_rcu();
- kfree(port);
+ kfree_rcu(port, rcu);
netdev_info(dev, "Port device %s removed\n", portname);
__team_compute_features(team);
@@ -1172,6 +1278,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
return team_change_mode(team, ctx->data.str_val);
}
+static int team_notify_peers_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.count;
+ return 0;
+}
+
+static int team_notify_peers_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_notify_peers_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.interval;
+ return 0;
+}
+
+static int team_notify_peers_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.interval = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.count;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.interval;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.interval = ctx->data.u32_val;
+ return 0;
+}
+
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
@@ -1202,6 +1364,8 @@ static int team_user_linkup_option_get(struct team *team,
return 0;
}
+static void __team_carrier_check(struct team *team);
+
static int team_user_linkup_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
@@ -1209,6 +1373,7 @@ static int team_user_linkup_option_set(struct team *team,
port->user.linkup = ctx->data.bool_val;
team_refresh_port_linkup(port);
+ __team_carrier_check(port->team);
return 0;
}
@@ -1228,6 +1393,7 @@ static int team_user_linkup_en_option_set(struct team *team,
port->user.linkup_enabled = ctx->data.bool_val;
team_refresh_port_linkup(port);
+ __team_carrier_check(port->team);
return 0;
}
@@ -1244,9 +1410,12 @@ static int team_priority_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
+ s32 priority = ctx->data.s32_val;
- port->priority = ctx->data.s32_val;
- team_queue_override_port_refresh(team, port);
+ if (port->priority == priority)
+ return 0;
+ port->priority = priority;
+ team_queue_override_port_prio_changed(team, port);
return 0;
}
@@ -1263,17 +1432,16 @@ static int team_queue_id_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
+ u16 new_queue_id = ctx->data.u32_val;
- if (port->queue_id == ctx->data.u32_val)
+ if (port->queue_id == new_queue_id)
return 0;
- if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+ if (new_queue_id >= team->dev->real_num_tx_queues)
return -EINVAL;
- port->queue_id = ctx->data.u32_val;
- team_queue_override_port_refresh(team, port);
+ team_queue_override_port_change_queue_id(team, port, new_queue_id);
return 0;
}
-
static const struct team_option team_options[] = {
{
.name = "mode",
@@ -1282,6 +1450,30 @@ static const struct team_option team_options[] = {
.setter = team_mode_option_set,
},
{
+ .name = "notify_peers_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_count_get,
+ .setter = team_notify_peers_count_set,
+ },
+ {
+ .name = "notify_peers_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_interval_get,
+ .setter = team_notify_peers_interval_set,
+ },
+ {
+ .name = "mcast_rejoin_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_count_get,
+ .setter = team_mcast_rejoin_count_set,
+ },
+ {
+ .name = "mcast_rejoin_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_interval_get,
+ .setter = team_mcast_rejoin_interval_set,
+ },
+ {
.name = "enabled",
.type = TEAM_OPTION_TYPE_BOOL,
.per_port = true,
@@ -1346,7 +1538,7 @@ static int team_init(struct net_device *dev)
mutex_init(&team->lock);
team_set_no_mode(team);
- team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
return -ENOMEM;
@@ -1361,6 +1553,10 @@ static int team_init(struct net_device *dev)
INIT_LIST_HEAD(&team->option_list);
INIT_LIST_HEAD(&team->option_inst_list);
+
+ team_notify_peers_init(team);
+ team_mcast_rejoin_init(team);
+
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
if (err)
goto err_options_register;
@@ -1371,6 +1567,8 @@ static int team_init(struct net_device *dev)
return 0;
err_options_register:
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
err_team_queue_override_init:
free_percpu(team->pcpu_stats);
@@ -1390,6 +1588,8 @@ static void team_uninit(struct net_device *dev)
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
@@ -1439,7 +1639,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -1489,8 +1690,8 @@ static void team_set_rx_mode(struct net_device *dev)
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) {
- dev_uc_sync(port->dev, dev);
- dev_mc_sync(port->dev, dev);
+ dev_uc_sync_multiple(port->dev, dev);
+ dev_mc_sync_multiple(port->dev, dev);
}
rcu_read_unlock();
}
@@ -1523,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
* to traverse list in reverse under rcu_read_lock
*/
mutex_lock(&team->lock);
+ team->port_mtu_change_allowed = true;
list_for_each_entry(port, &team->port_list, list) {
err = dev_set_mtu(port->dev, new_mtu);
if (err) {
@@ -1531,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
goto unwind;
}
}
+ team->port_mtu_change_allowed = false;
mutex_unlock(&team->lock);
dev->mtu = new_mtu;
@@ -1540,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
dev_set_mtu(port->dev, dev->mtu);
+ team->port_mtu_change_allowed = false;
mutex_unlock(&team->lock);
return err;
@@ -1558,13 +1762,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
for_each_possible_cpu(i) {
p = per_cpu_ptr(team->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1583,7 +1787,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
return stats;
}
-static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
@@ -1595,7 +1799,7 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
*/
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = vlan_vid_add(port->dev, vid);
+ err = vlan_vid_add(port->dev, proto, vid);
if (err)
goto unwind;
}
@@ -1605,20 +1809,20 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
- vlan_vid_del(port->dev, vid);
+ vlan_vid_del(port->dev, proto, vid);
mutex_unlock(&team->lock);
return err;
}
-static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
- vlan_vid_del(port->dev, vid);
+ vlan_vid_del(port->dev, proto, vid);
rcu_read_unlock();
return 0;
@@ -1647,7 +1851,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
}
static int team_netpoll_setup(struct net_device *dev,
- struct netpoll_info *npifo, gfp_t gfp)
+ struct netpoll_info *npifo)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
@@ -1655,7 +1859,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port, gfp);
+ err = team_port_enable_netpoll(team, port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -1776,7 +1980,7 @@ static void team_setup_by_port(struct net_device *dev,
dev->addr_len = port_dev->addr_len;
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
- memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+ eth_hw_addr_inherit(dev, port_dev);
}
static int team_dev_type_check_change(struct net_device *dev,
@@ -1825,10 +2029,14 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
+
+ /* Don't allow team devices to change network namespaces. */
+ dev->features |= NETIF_F_NETNS_LOCAL;
+
dev->hw_features = TEAM_VLAN_FEATURES |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
dev->features |= dev->hw_features;
@@ -2359,7 +2567,8 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
bool incomplete;
int i;
- port = list_first_entry(&team->port_list, struct team_port, list);
+ port = list_first_entry_or_null(&team->port_list,
+ struct team_port, list);
start_again:
err = __send_and_alloc_skb(&skb, team, portid, send_func);
@@ -2387,8 +2596,8 @@ start_again:
err = team_nl_fill_one_port_get(skb, one_port);
if (err)
goto errout;
- } else {
- list_for_each_entry(port, &team->port_list, list) {
+ } else if (port) {
+ list_for_each_entry_from(port, &team->port_list, list) {
err = team_nl_fill_one_port_get(skb, port);
if (err) {
if (err == -EMSGSIZE) {
@@ -2445,7 +2654,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
return err;
}
-static struct genl_ops team_nl_ops[] = {
+static const struct genl_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
.doit = team_nl_cmd_noop,
@@ -2471,15 +2680,15 @@ static struct genl_ops team_nl_ops[] = {
},
};
-static struct genl_multicast_group team_change_event_mcgrp = {
- .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+static const struct genl_multicast_group team_nl_mcgrps[] = {
+ { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
};
static int team_nl_send_multicast(struct sk_buff *skb,
struct team *team, u32 portid)
{
- return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
- team_change_event_mcgrp.id, GFP_KERNEL);
+ return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
+ skb, 0, 0, GFP_KERNEL);
}
static int team_nl_send_event_options_get(struct team *team,
@@ -2498,23 +2707,8 @@ static int team_nl_send_event_port_get(struct team *team,
static int team_nl_init(void)
{
- int err;
-
- err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
- ARRAY_SIZE(team_nl_ops));
- if (err)
- return err;
-
- err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
- if (err)
- goto err_change_event_grp_reg;
-
- return 0;
-
-err_change_event_grp_reg:
- genl_unregister_family(&team_nl_family);
-
- return err;
+ return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
+ team_nl_mcgrps);
}
static void team_nl_fini(void)
@@ -2632,7 +2826,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
static int team_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *dev = (struct net_device *) ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct team_port *port;
port = team_port_get_rtnl(dev);
@@ -2643,8 +2837,10 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_UP:
if (netif_carrier_ok(dev))
team_port_change_check(port, true);
+ break;
case NETDEV_DOWN:
team_port_change_check(port, false);
+ break;
case NETDEV_CHANGE:
if (netif_running(port->dev))
team_port_change_check(port,
@@ -2656,12 +2852,18 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
team_compute_features(port->team);
break;
- case NETDEV_CHANGEMTU:
+ case NETDEV_PRECHANGEMTU:
/* Forbid to change mtu of underlaying device */
- return NOTIFY_BAD;
+ if (!port->team->port_mtu_change_allowed)
+ return NOTIFY_BAD;
+ break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid to change type of underlaying device */
return NOTIFY_BAD;
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, port->team->dev);
+ break;
}
return NOTIFY_DONE;
}