diff options
Diffstat (limited to 'drivers/net/team')
| -rw-r--r-- | drivers/net/team/team.c | 75 | ||||
| -rw-r--r-- | drivers/net/team/team_mode_loadbalance.c | 25 | ||||
| -rw-r--r-- | drivers/net/team/team_mode_random.c | 8 | 
3 files changed, 54 insertions, 54 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 50e43e64d51..b4958c7ffa8 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -968,7 +968,7 @@ static void team_port_disable(struct team *team,  static void __team_compute_features(struct team *team)  {  	struct team_port *port; -	u32 vlan_features = TEAM_VLAN_FEATURES; +	u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;  	unsigned short max_hard_header_len = ETH_HLEN;  	unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; @@ -1031,8 +1031,7 @@ static void team_port_leave(struct team *team, struct team_port *port)  }  #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port, -				    gfp_t gfp) +static int team_port_enable_netpoll(struct team *team, struct team_port *port)  {  	struct netpoll *np;  	int err; @@ -1040,11 +1039,11 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,  	if (!team->dev->npinfo)  		return 0; -	np = kzalloc(sizeof(*np), gfp); +	np = kzalloc(sizeof(*np), GFP_KERNEL);  	if (!np)  		return -ENOMEM; -	err = __netpoll_setup(np, port->dev, gfp); +	err = __netpoll_setup(np, port->dev);  	if (err) {  		kfree(np);  		return err; @@ -1067,8 +1066,7 @@ static void team_port_disable_netpoll(struct team_port *port)  	kfree(np);  }  #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port, -				    gfp_t gfp) +static int team_port_enable_netpoll(struct team *team, struct team_port *port)  {  	return 0;  } @@ -1156,7 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)  		goto err_vids_add;  	} -	err = team_port_enable_netpoll(team, port, GFP_KERNEL); +	err = team_port_enable_netpoll(team, port);  	if (err) {  		netdev_err(dev, "Failed to enable netpoll on device %s\n",  			   portname); @@ -1366,6 +1364,8 @@ static int team_user_linkup_option_get(struct team *team,  	return 0;  } +static void __team_carrier_check(struct team *team); +  static int team_user_linkup_option_set(struct team *team,  				       struct team_gsetter_ctx *ctx)  { @@ -1373,6 +1373,7 @@ static int team_user_linkup_option_set(struct team *team,  	port->user.linkup = ctx->data.bool_val;  	team_refresh_port_linkup(port); +	__team_carrier_check(port->team);  	return 0;  } @@ -1392,6 +1393,7 @@ static int team_user_linkup_en_option_set(struct team *team,  	port->user.linkup_enabled = ctx->data.bool_val;  	team_refresh_port_linkup(port); +	__team_carrier_check(port->team);  	return 0;  } @@ -1536,7 +1538,7 @@ static int team_init(struct net_device *dev)  	mutex_init(&team->lock);  	team_set_no_mode(team); -	team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); +	team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);  	if (!team->pcpu_stats)  		return -ENOMEM; @@ -1637,7 +1639,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)  	return NETDEV_TX_OK;  } -static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, +			     void *accel_priv, select_queue_fallback_t fallback)  {  	/*  	 * This helper function exists to help dev_pick_tx get the correct @@ -1721,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)  	 * to traverse list in reverse under rcu_read_lock  	 */  	mutex_lock(&team->lock); +	team->port_mtu_change_allowed = true;  	list_for_each_entry(port, &team->port_list, list) {  		err = dev_set_mtu(port->dev, new_mtu);  		if (err) { @@ -1729,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)  			goto unwind;  		}  	} +	team->port_mtu_change_allowed = false;  	mutex_unlock(&team->lock);  	dev->mtu = new_mtu; @@ -1738,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)  unwind:  	list_for_each_entry_continue_reverse(port, &team->port_list, list)  		dev_set_mtu(port->dev, dev->mtu); +	team->port_mtu_change_allowed = false;  	mutex_unlock(&team->lock);  	return err; @@ -1756,13 +1762,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)  	for_each_possible_cpu(i) {  		p = per_cpu_ptr(team->pcpu_stats, i);  		do { -			start = u64_stats_fetch_begin_bh(&p->syncp); +			start = u64_stats_fetch_begin_irq(&p->syncp);  			rx_packets	= p->rx_packets;  			rx_bytes	= p->rx_bytes;  			rx_multicast	= p->rx_multicast;  			tx_packets	= p->tx_packets;  			tx_bytes	= p->tx_bytes; -		} while (u64_stats_fetch_retry_bh(&p->syncp, start)); +		} while (u64_stats_fetch_retry_irq(&p->syncp, start));  		stats->rx_packets	+= rx_packets;  		stats->rx_bytes		+= rx_bytes; @@ -1845,7 +1851,7 @@ static void team_netpoll_cleanup(struct net_device *dev)  }  static int team_netpoll_setup(struct net_device *dev, -			      struct netpoll_info *npifo, gfp_t gfp) +			      struct netpoll_info *npifo)  {  	struct team *team = netdev_priv(dev);  	struct team_port *port; @@ -1853,7 +1859,7 @@ static int team_netpoll_setup(struct net_device *dev,  	mutex_lock(&team->lock);  	list_for_each_entry(port, &team->port_list, list) { -		err = team_port_enable_netpoll(team, port, gfp); +		err = team_port_enable_netpoll(team, port);  		if (err) {  			__team_netpoll_cleanup(team);  			break; @@ -2023,6 +2029,10 @@ static void team_setup(struct net_device *dev)  	dev->features |= NETIF_F_LLTX;  	dev->features |= NETIF_F_GRO; + +	/* Don't allow team devices to change network namespaces. */ +	dev->features |= NETIF_F_NETNS_LOCAL; +  	dev->hw_features = TEAM_VLAN_FEATURES |  			   NETIF_F_HW_VLAN_CTAG_TX |  			   NETIF_F_HW_VLAN_CTAG_RX | @@ -2644,7 +2654,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,  	return err;  } -static struct genl_ops team_nl_ops[] = { +static const struct genl_ops team_nl_ops[] = {  	{  		.cmd = TEAM_CMD_NOOP,  		.doit = team_nl_cmd_noop, @@ -2670,15 +2680,15 @@ static struct genl_ops team_nl_ops[] = {  	},  }; -static struct genl_multicast_group team_change_event_mcgrp = { -	.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, +static const struct genl_multicast_group team_nl_mcgrps[] = { +	{ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },  };  static int team_nl_send_multicast(struct sk_buff *skb,  				  struct team *team, u32 portid)  { -	return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, -				       team_change_event_mcgrp.id, GFP_KERNEL); +	return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), +				       skb, 0, 0, GFP_KERNEL);  }  static int team_nl_send_event_options_get(struct team *team, @@ -2697,23 +2707,8 @@ static int team_nl_send_event_port_get(struct team *team,  static int team_nl_init(void)  { -	int err; - -	err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, -					    ARRAY_SIZE(team_nl_ops)); -	if (err) -		return err; - -	err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); -	if (err) -		goto err_change_event_grp_reg; - -	return 0; - -err_change_event_grp_reg: -	genl_unregister_family(&team_nl_family); - -	return err; +	return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops, +						    team_nl_mcgrps);  }  static void team_nl_fini(void) @@ -2842,8 +2837,10 @@ static int team_device_event(struct notifier_block *unused,  	case NETDEV_UP:  		if (netif_carrier_ok(dev))  			team_port_change_check(port, true); +		break;  	case NETDEV_DOWN:  		team_port_change_check(port, false); +		break;  	case NETDEV_CHANGE:  		if (netif_running(port->dev))  			team_port_change_check(port, @@ -2855,9 +2852,11 @@ static int team_device_event(struct notifier_block *unused,  	case NETDEV_FEAT_CHANGE:  		team_compute_features(port->team);  		break; -	case NETDEV_CHANGEMTU: +	case NETDEV_PRECHANGEMTU:  		/* Forbid to change mtu of underlaying device */ -		return NOTIFY_BAD; +		if (!port->team->port_mtu_change_allowed) +			return NOTIFY_BAD; +		break;  	case NETDEV_PRE_TYPE_CHANGE:  		/* Forbid to change type of underlaying device */  		return NOTIFY_BAD; diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index 829a9cd2b4d..a58dfebb551 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -49,7 +49,7 @@ struct lb_port_mapping {  struct lb_priv_ex {  	struct team *team;  	struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; -	struct sock_fprog *orig_fprog; +	struct sock_fprog_kern *orig_fprog;  	struct {  		unsigned int refresh_interval; /* in tenths of second */  		struct delayed_work refresh_dw; @@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)  	return 0;  } -static int __fprog_create(struct sock_fprog **pfprog, u32 data_len, +static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,  			  const void *data)  { -	struct sock_fprog *fprog; +	struct sock_fprog_kern *fprog;  	struct sock_filter *filter = (struct sock_filter *) data;  	if (data_len % sizeof(struct sock_filter))  		return -EINVAL; -	fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL); +	fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);  	if (!fprog)  		return -ENOMEM;  	fprog->filter = kmemdup(filter, data_len, GFP_KERNEL); @@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,  	return 0;  } -static void __fprog_destroy(struct sock_fprog *fprog) +static void __fprog_destroy(struct sock_fprog_kern *fprog)  {  	kfree(fprog->filter);  	kfree(fprog); @@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)  	struct lb_priv *lb_priv = get_lb_priv(team);  	struct sk_filter *fp = NULL;  	struct sk_filter *orig_fp; -	struct sock_fprog *fprog = NULL; +	struct sock_fprog_kern *fprog = NULL;  	int err;  	if (ctx->data.bin_val.len) { @@ -432,9 +432,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,  	struct lb_stats tmp;  	do { -		start = u64_stats_fetch_begin_bh(syncp); +		start = u64_stats_fetch_begin_irq(syncp);  		tmp.tx_bytes = cpu_stats->tx_bytes; -	} while (u64_stats_fetch_retry_bh(syncp, start)); +	} while (u64_stats_fetch_retry_irq(syncp, start));  	acc_stats->tx_bytes += tmp.tx_bytes;  } @@ -570,7 +570,7 @@ static int lb_init(struct team *team)  {  	struct lb_priv *lb_priv = get_lb_priv(team);  	lb_select_tx_port_func_t *func; -	int err; +	int i, err;  	/* set default tx port selector */  	func = lb_select_tx_port_get_func("hash"); @@ -588,6 +588,13 @@ static int lb_init(struct team *team)  		goto err_alloc_pcpu_stats;  	} +	for_each_possible_cpu(i) { +		struct lb_pcpu_stats *team_lb_stats; +		team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); +		u64_stats_init(&team_lb_stats->syncp); +	} + +  	INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);  	err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options)); diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c index 7f032e21134..cd2f692b807 100644 --- a/drivers/net/team/team_mode_random.c +++ b/drivers/net/team/team_mode_random.c @@ -13,20 +13,14 @@  #include <linux/module.h>  #include <linux/init.h>  #include <linux/skbuff.h> -#include <linux/reciprocal_div.h>  #include <linux/if_team.h> -static u32 random_N(unsigned int N) -{ -	return reciprocal_divide(prandom_u32(), N); -} -  static bool rnd_transmit(struct team *team, struct sk_buff *skb)  {  	struct team_port *port;  	int port_index; -	port_index = random_N(team->en_port_count); +	port_index = prandom_u32_max(team->en_port_count);  	port = team_get_port_by_index_rcu(team, port_index);  	if (unlikely(!port))  		goto drop;  | 
