diff options
Diffstat (limited to 'net/8021q')
| -rw-r--r-- | net/8021q/Kconfig | 23 | ||||
| -rw-r--r-- | net/8021q/Makefile | 1 | ||||
| -rw-r--r-- | net/8021q/vlan.c | 344 | ||||
| -rw-r--r-- | net/8021q/vlan.h | 180 | ||||
| -rw-r--r-- | net/8021q/vlan_core.c | 407 | ||||
| -rw-r--r-- | net/8021q/vlan_dev.c | 575 | ||||
| -rw-r--r-- | net/8021q/vlan_gvrp.c | 8 | ||||
| -rw-r--r-- | net/8021q/vlan_mvrp.c | 76 | ||||
| -rw-r--r-- | net/8021q/vlan_netlink.c | 59 | ||||
| -rw-r--r-- | net/8021q/vlanproc.c | 59 | 
10 files changed, 1049 insertions, 683 deletions
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig index fa073a54963..42320180967 100644 --- a/net/8021q/Kconfig +++ b/net/8021q/Kconfig @@ -3,14 +3,14 @@  #  config VLAN_8021Q -	tristate "802.1Q VLAN Support" +	tristate "802.1Q/802.1ad VLAN Support"  	---help---  	  Select this and you will be able to create 802.1Q VLAN interfaces -	  on your ethernet interfaces.  802.1Q VLAN supports almost -	  everything a regular ethernet interface does, including -	  firewalling, bridging, and of course IP traffic.  You will need -	  the 'vconfig' tool from the VLAN project in order to effectively -	  use VLANs.  See the VLAN web page for more information: +	  on your Ethernet interfaces. 802.1Q VLAN supports almost +	  everything a regular Ethernet interface does, including +	  firewalling, bridging, and of course IP traffic. You will need +	  the 'ip' utility in order to effectively use VLANs. +	  See the VLAN web page for more information:  	  <http://www.candelatech.com/~greear/vlan.html>  	  To compile this code as a module, choose M here: the module @@ -27,3 +27,14 @@ config VLAN_8021Q_GVRP  	  automatic propagation of registered VLANs to switches.  	  If unsure, say N. + +config VLAN_8021Q_MVRP +	bool "MVRP (Multiple VLAN Registration Protocol) support" +	depends on VLAN_8021Q +	select MRP +	help +	  Select this to enable MVRP end-system support. MVRP is used for +	  automatic propagation of registered VLANs to switches; it +	  supersedes GVRP and is not backwards-compatible. + +	  If unsure, say N. diff --git a/net/8021q/Makefile b/net/8021q/Makefile index 9f4f174ead1..7bc8db08d7e 100644 --- a/net/8021q/Makefile +++ b/net/8021q/Makefile @@ -6,5 +6,6 @@ obj-$(CONFIG_VLAN_8021Q)		+= 8021q.o  8021q-y					:= vlan.o vlan_dev.o vlan_netlink.o  8021q-$(CONFIG_VLAN_8021Q_GVRP)		+= vlan_gvrp.o +8021q-$(CONFIG_VLAN_8021Q_MVRP)		+= vlan_mvrp.o  8021q-$(CONFIG_PROC_FS)			+= vlanproc.o diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 6e64f7c6a2e..44ebd5c2cd4 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -18,6 +18,8 @@   *		2 of the License, or (at your option) any later version.   */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +  #include <linux/capability.h>  #include <linux/module.h>  #include <linux/netdevice.h> @@ -46,45 +48,21 @@ int vlan_net_id __read_mostly;  const char vlan_fullname[] = "802.1Q VLAN Support";  const char vlan_version[] = DRV_VERSION; -static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; -static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; - -static struct packet_type vlan_packet_type __read_mostly = { -	.type = cpu_to_be16(ETH_P_8021Q), -	.func = vlan_skb_recv, /* VLAN receive method */ -};  /* End of global variables definitions. */ -static void vlan_group_free(struct vlan_group *grp) -{ -	int i; - -	for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) -		kfree(grp->vlan_devices_arrays[i]); -	kfree(grp); -} - -static struct vlan_group *vlan_group_alloc(struct net_device *real_dev) -{ -	struct vlan_group *grp; - -	grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); -	if (!grp) -		return NULL; - -	grp->real_dev = real_dev; -	return grp; -} - -static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) +static int vlan_group_prealloc_vid(struct vlan_group *vg, +				   __be16 vlan_proto, u16 vlan_id)  {  	struct net_device **array; +	unsigned int pidx, vidx;  	unsigned int size;  	ASSERT_RTNL(); -	array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; +	pidx  = vlan_proto_idx(vlan_proto); +	vidx  = vlan_id / VLAN_GROUP_ARRAY_PART_LEN; +	array = vg->vlan_devices_arrays[pidx][vidx];  	if (array != NULL)  		return 0; @@ -93,76 +71,68 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)  	if (array == NULL)  		return -ENOBUFS; -	vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array; +	vg->vlan_devices_arrays[pidx][vidx] = array;  	return 0;  } -static void vlan_rcu_free(struct rcu_head *rcu) -{ -	vlan_group_free(container_of(rcu, struct vlan_group, rcu)); -} -  void unregister_vlan_dev(struct net_device *dev, struct list_head *head)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct net_device *real_dev = vlan->real_dev; -	const struct net_device_ops *ops = real_dev->netdev_ops; +	struct vlan_info *vlan_info;  	struct vlan_group *grp;  	u16 vlan_id = vlan->vlan_id;  	ASSERT_RTNL(); -	grp = rtnl_dereference(real_dev->vlgrp); -	BUG_ON(!grp); +	vlan_info = rtnl_dereference(real_dev->vlan_info); +	BUG_ON(!vlan_info); -	/* Take it out of our own structures, but be sure to interlock with -	 * HW accelerating devices or SW vlan input packet processing if -	 * VLAN is not 0 (leave it there for 802.1p). -	 */ -	if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER)) -		ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); +	grp = &vlan_info->grp; + +	grp->nr_vlan_devs--; -	grp->nr_vlans--; +	if (vlan->flags & VLAN_FLAG_MVRP) +		vlan_mvrp_request_leave(dev); +	if (vlan->flags & VLAN_FLAG_GVRP) +		vlan_gvrp_request_leave(dev); -	vlan_group_set_device(grp, vlan_id, NULL); -	if (!grp->killall) -		synchronize_net(); +	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL); +	netdev_upper_dev_unlink(real_dev, dev); +	/* Because unregister_netdevice_queue() makes sure at least one rcu +	 * grace period is respected before device freeing, +	 * we dont need to call synchronize_net() here. +	 */  	unregister_netdevice_queue(dev, head); -	/* If the group is now empty, kill off the group. */ -	if (grp->nr_vlans == 0) { +	if (grp->nr_vlan_devs == 0) { +		vlan_mvrp_uninit_applicant(real_dev);  		vlan_gvrp_uninit_applicant(real_dev); - -		rcu_assign_pointer(real_dev->vlgrp, NULL); -		if (ops->ndo_vlan_rx_register) -			ops->ndo_vlan_rx_register(real_dev, NULL); - -		/* Free the group, after all cpu's are done. */ -		call_rcu(&grp->rcu, vlan_rcu_free);  	} +	/* Take it out of our own structures, but be sure to interlock with +	 * HW accelerating devices or SW vlan input packet processing if +	 * VLAN is not 0 (leave it there for 802.1p). +	 */ +	if (vlan_id) +		vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); +  	/* Get rid of the vlan's reference to real_dev */  	dev_put(real_dev);  } -int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) +int vlan_check_real_dev(struct net_device *real_dev, +			__be16 protocol, u16 vlan_id)  {  	const char *name = real_dev->name; -	const struct net_device_ops *ops = real_dev->netdev_ops;  	if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { -		pr_info("8021q: VLANs not supported on %s\n", name); -		return -EOPNOTSUPP; -	} - -	if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) && -	    (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) { -		pr_info("8021q: Device %s has buggy VLAN hw accel\n", name); +		pr_info("VLANs not supported on %s\n", name);  		return -EOPNOTSUPP;  	} -	if (vlan_find_dev(real_dev, vlan_id) != NULL) +	if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL)  		return -EEXIST;  	return 0; @@ -170,32 +140,45 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)  int register_vlan_dev(struct net_device *dev)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct net_device *real_dev = vlan->real_dev; -	const struct net_device_ops *ops = real_dev->netdev_ops;  	u16 vlan_id = vlan->vlan_id; -	struct vlan_group *grp, *ngrp = NULL; +	struct vlan_info *vlan_info; +	struct vlan_group *grp;  	int err; -	grp = rtnl_dereference(real_dev->vlgrp); -	if (!grp) { -		ngrp = grp = vlan_group_alloc(real_dev); -		if (!grp) -			return -ENOBUFS; +	err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id); +	if (err) +		return err; + +	vlan_info = rtnl_dereference(real_dev->vlan_info); +	/* vlan_info should be there now. vlan_vid_add took care of it */ +	BUG_ON(!vlan_info); + +	grp = &vlan_info->grp; +	if (grp->nr_vlan_devs == 0) {  		err = vlan_gvrp_init_applicant(real_dev);  		if (err < 0) -			goto out_free_group; +			goto out_vid_del; +		err = vlan_mvrp_init_applicant(real_dev); +		if (err < 0) +			goto out_uninit_gvrp;  	} -	err = vlan_group_prealloc_vid(grp, vlan_id); +	err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);  	if (err < 0) -		goto out_uninit_applicant; +		goto out_uninit_mvrp; +	vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;  	err = register_netdevice(dev);  	if (err < 0) -		goto out_uninit_applicant; +		goto out_uninit_mvrp; + +	err = netdev_upper_dev_link(real_dev, dev); +	if (err) +		goto out_unregister_netdev; -	/* Account for reference in struct vlan_dev_info */ +	/* Account for reference in struct vlan_dev_priv */  	dev_hold(real_dev);  	netif_stacked_transfer_operstate(real_dev, dev); @@ -204,27 +187,21 @@ int register_vlan_dev(struct net_device *dev)  	/* So, got the sucker initialized, now lets place  	 * it into our local structure.  	 */ -	vlan_group_set_device(grp, vlan_id, dev); -	grp->nr_vlans++; - -	if (ngrp) { -		if (ops->ndo_vlan_rx_register) -			ops->ndo_vlan_rx_register(real_dev, ngrp); -		rcu_assign_pointer(real_dev->vlgrp, ngrp); -	} -	if (real_dev->features & NETIF_F_HW_VLAN_FILTER) -		ops->ndo_vlan_rx_add_vid(real_dev, vlan_id); +	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); +	grp->nr_vlan_devs++;  	return 0; -out_uninit_applicant: -	if (ngrp) +out_unregister_netdev: +	unregister_netdevice(dev); +out_uninit_mvrp: +	if (grp->nr_vlan_devs == 0) +		vlan_mvrp_uninit_applicant(real_dev); +out_uninit_gvrp: +	if (grp->nr_vlan_devs == 0)  		vlan_gvrp_uninit_applicant(real_dev); -out_free_group: -	if (ngrp) { -		/* Free the group, after all cpu's are done. */ -		call_rcu(&ngrp->rcu, vlan_rcu_free); -	} +out_vid_del: +	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);  	return err;  } @@ -234,6 +211,7 @@ out_free_group:  static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)  {  	struct net_device *new_dev; +	struct vlan_dev_priv *vlan;  	struct net *net = dev_net(real_dev);  	struct vlan_net *vn = net_generic(net, vlan_net_id);  	char name[IFNAMSIZ]; @@ -242,7 +220,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)  	if (vlan_id >= VLAN_VID_MASK)  		return -ERANGE; -	err = vlan_check_real_dev(real_dev, vlan_id); +	err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id);  	if (err < 0)  		return err; @@ -272,7 +250,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)  		snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);  	} -	new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup); +	new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);  	if (new_dev == NULL)  		return -ENOBUFS; @@ -282,11 +260,14 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)  	 * hope the underlying device can handle it.  	 */  	new_dev->mtu = real_dev->mtu; +	new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); -	vlan_dev_info(new_dev)->vlan_id = vlan_id; -	vlan_dev_info(new_dev)->real_dev = real_dev; -	vlan_dev_info(new_dev)->dent = NULL; -	vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR; +	vlan = vlan_dev_priv(new_dev); +	vlan->vlan_proto = htons(ETH_P_8021Q); +	vlan->vlan_id = vlan_id; +	vlan->real_dev = real_dev; +	vlan->dent = NULL; +	vlan->flags = VLAN_FLAG_REORDER_HDR;  	new_dev->rtnl_link_ops = &vlan_link_ops;  	err = register_vlan_dev(new_dev); @@ -303,46 +284,44 @@ out_free_newdev:  static void vlan_sync_address(struct net_device *dev,  			      struct net_device *vlandev)  { -	struct vlan_dev_info *vlan = vlan_dev_info(vlandev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);  	/* May be called without an actual change */ -	if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) +	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))  		return;  	/* vlan address was different from the old address and is equal to  	 * the new address */ -	if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && -	    !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) +	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && +	    ether_addr_equal(vlandev->dev_addr, dev->dev_addr))  		dev_uc_del(dev, vlandev->dev_addr);  	/* vlan address was equal to the old address and is different from  	 * the new address */ -	if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && -	    compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) +	if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && +	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))  		dev_uc_add(dev, vlandev->dev_addr); -	memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); +	ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);  }  static void vlan_transfer_features(struct net_device *dev,  				   struct net_device *vlandev)  { -	unsigned long old_features = vlandev->features; +	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); -	vlandev->features &= ~dev->vlan_features; -	vlandev->features |= dev->features & dev->vlan_features;  	vlandev->gso_max_size = dev->gso_max_size; -	if (dev->features & NETIF_F_HW_VLAN_TX) +	if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))  		vlandev->hard_header_len = dev->hard_header_len;  	else  		vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#if IS_ENABLED(CONFIG_FCOE)  	vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;  #endif -	if (old_features != vlandev->features) -		netdev_features_change(vlandev); + +	netdev_update_features(vlandev);  }  static void __vlan_device_event(struct net_device *dev, unsigned long event) @@ -351,13 +330,12 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event)  	case NETDEV_CHANGENAME:  		vlan_proc_rem_dev(dev);  		if (vlan_proc_add_dev(dev) < 0) -			pr_warning("8021q: failed to change proc name for %s\n", -					dev->name); +			pr_warn("failed to change proc name for %s\n", +				dev->name);  		break;  	case NETDEV_REGISTER:  		if (vlan_proc_add_dev(dev) < 0) -			pr_warning("8021q: failed to add proc entry for %s\n", -					dev->name); +			pr_warn("failed to add proc entry for %s\n", dev->name);  		break;  	case NETDEV_UNREGISTER:  		vlan_proc_rem_dev(dev); @@ -368,27 +346,29 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event)  static int vlan_device_event(struct notifier_block *unused, unsigned long event,  			     void *ptr)  { -	struct net_device *dev = ptr; +	struct net_device *dev = netdev_notifier_info_to_dev(ptr);  	struct vlan_group *grp; +	struct vlan_info *vlan_info;  	int i, flgs;  	struct net_device *vlandev; -	struct vlan_dev_info *vlan; +	struct vlan_dev_priv *vlan; +	bool last = false;  	LIST_HEAD(list);  	if (is_vlan_dev(dev))  		__vlan_device_event(dev, event);  	if ((event == NETDEV_UP) && -	    (dev->features & NETIF_F_HW_VLAN_FILTER) && -	    dev->netdev_ops->ndo_vlan_rx_add_vid) { -		pr_info("8021q: adding VLAN 0 to HW filter on device %s\n", +	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { +		pr_info("adding VLAN 0 to HW filter on device %s\n",  			dev->name); -		dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0); +		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);  	} -	grp = rtnl_dereference(dev->vlgrp); -	if (!grp) +	vlan_info = rtnl_dereference(dev->vlan_info); +	if (!vlan_info)  		goto out; +	grp = &vlan_info->grp;  	/* It is OK that we do not hold the group lock right now,  	 * as we run under the RTNL lock. @@ -397,22 +377,13 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,  	switch (event) {  	case NETDEV_CHANGE:  		/* Propagate real device state to vlan devices */ -		for (i = 0; i < VLAN_N_VID; i++) { -			vlandev = vlan_group_get_device(grp, i); -			if (!vlandev) -				continue; - +		vlan_group_for_each_dev(grp, i, vlandev)  			netif_stacked_transfer_operstate(dev, vlandev); -		}  		break;  	case NETDEV_CHANGEADDR:  		/* Adjust unicast filters on underlying device */ -		for (i = 0; i < VLAN_N_VID; i++) { -			vlandev = vlan_group_get_device(grp, i); -			if (!vlandev) -				continue; - +		vlan_group_for_each_dev(grp, i, vlandev) {  			flgs = vlandev->flags;  			if (!(flgs & IFF_UP))  				continue; @@ -422,11 +393,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,  		break;  	case NETDEV_CHANGEMTU: -		for (i = 0; i < VLAN_N_VID; i++) { -			vlandev = vlan_group_get_device(grp, i); -			if (!vlandev) -				continue; - +		vlan_group_for_each_dev(grp, i, vlandev) {  			if (vlandev->mtu <= dev->mtu)  				continue; @@ -436,28 +403,21 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,  	case NETDEV_FEAT_CHANGE:  		/* Propagate device features to underlying device */ -		for (i = 0; i < VLAN_N_VID; i++) { -			vlandev = vlan_group_get_device(grp, i); -			if (!vlandev) -				continue; - +		vlan_group_for_each_dev(grp, i, vlandev)  			vlan_transfer_features(dev, vlandev); -		} -  		break;  	case NETDEV_DOWN: -		/* Put all VLANs for this dev in the down state too.  */ -		for (i = 0; i < VLAN_N_VID; i++) { -			vlandev = vlan_group_get_device(grp, i); -			if (!vlandev) -				continue; +		if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) +			vlan_vid_del(dev, htons(ETH_P_8021Q), 0); +		/* Put all VLANs for this dev in the down state too.  */ +		vlan_group_for_each_dev(grp, i, vlandev) {  			flgs = vlandev->flags;  			if (!(flgs & IFF_UP))  				continue; -			vlan = vlan_dev_info(vlandev); +			vlan = vlan_dev_priv(vlandev);  			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))  				dev_change_flags(vlandev, flgs & ~IFF_UP);  			netif_stacked_transfer_operstate(dev, vlandev); @@ -466,16 +426,12 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,  	case NETDEV_UP:  		/* Put all VLANs for this dev in the up state too.  */ -		for (i = 0; i < VLAN_N_VID; i++) { -			vlandev = vlan_group_get_device(grp, i); -			if (!vlandev) -				continue; - +		vlan_group_for_each_dev(grp, i, vlandev) {  			flgs = vlandev->flags;  			if (flgs & IFF_UP)  				continue; -			vlan = vlan_dev_info(vlandev); +			vlan = vlan_dev_priv(vlandev);  			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))  				dev_change_flags(vlandev, flgs | IFF_UP);  			netif_stacked_transfer_operstate(dev, vlandev); @@ -487,27 +443,32 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,  		if (dev->reg_state != NETREG_UNREGISTERING)  			break; -		/* Delete all VLANs for this dev. */ -		grp->killall = 1; - -		for (i = 0; i < VLAN_N_VID; i++) { -			vlandev = vlan_group_get_device(grp, i); -			if (!vlandev) -				continue; - -			/* unregistration of last vlan destroys group, abort +		vlan_group_for_each_dev(grp, i, vlandev) { +			/* removal of last vid destroys vlan_info, abort  			 * afterwards */ -			if (grp->nr_vlans == 1) -				i = VLAN_N_VID; +			if (vlan_info->nr_vids == 1) +				last = true;  			unregister_vlan_dev(vlandev, &list); +			if (last) +				break;  		}  		unregister_netdevice_many(&list);  		break;  	case NETDEV_PRE_TYPE_CHANGE:  		/* Forbid underlaying device to change its type. */ -		return NOTIFY_BAD; +		if (vlan_uses_dev(dev)) +			return NOTIFY_BAD; +		break; + +	case NETDEV_NOTIFY_PEERS: +	case NETDEV_BONDING_FAILOVER: +	case NETDEV_RESEND_IGMP: +		/* Propagate to vlan devices */ +		vlan_group_for_each_dev(grp, i, vlandev) +			call_netdevice_notifiers(event, vlandev); +		break;  	}  out: @@ -559,7 +520,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)  	switch (args.cmd) {  	case SET_VLAN_INGRESS_PRIORITY_CMD:  		err = -EPERM; -		if (!capable(CAP_NET_ADMIN)) +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			break;  		vlan_dev_set_ingress_priority(dev,  					      args.u.skb_priority, @@ -569,7 +530,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)  	case SET_VLAN_EGRESS_PRIORITY_CMD:  		err = -EPERM; -		if (!capable(CAP_NET_ADMIN)) +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			break;  		err = vlan_dev_set_egress_priority(dev,  						   args.u.skb_priority, @@ -578,7 +539,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)  	case SET_VLAN_FLAG_CMD:  		err = -EPERM; -		if (!capable(CAP_NET_ADMIN)) +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			break;  		err = vlan_dev_change_flags(dev,  					    args.vlan_qos ? args.u.flag : 0, @@ -587,7 +548,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)  	case SET_VLAN_NAME_TYPE_CMD:  		err = -EPERM; -		if (!capable(CAP_NET_ADMIN)) +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			break;  		if ((args.u.name_type >= 0) &&  		    (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { @@ -603,14 +564,14 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)  	case ADD_VLAN_CMD:  		err = -EPERM; -		if (!capable(CAP_NET_ADMIN)) +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			break;  		err = register_vlan_device(dev, args.u.VID);  		break;  	case DEL_VLAN_CMD:  		err = -EPERM; -		if (!capable(CAP_NET_ADMIN)) +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			break;  		unregister_vlan_dev(dev, NULL);  		err = 0; @@ -669,8 +630,7 @@ static int __init vlan_proto_init(void)  {  	int err; -	pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); -	pr_info("All bugs added by %s\n", vlan_buggyright); +	pr_info("%s v%s\n", vlan_fullname, vlan_version);  	err = register_pernet_subsys(&vlan_net_ops);  	if (err < 0) @@ -684,14 +644,19 @@ static int __init vlan_proto_init(void)  	if (err < 0)  		goto err3; -	err = vlan_netlink_init(); +	err = vlan_mvrp_init();  	if (err < 0)  		goto err4; -	dev_add_pack(&vlan_packet_type); +	err = vlan_netlink_init(); +	if (err < 0) +		goto err5; +  	vlan_ioctl_set(vlan_ioctl_handler);  	return 0; +err5: +	vlan_mvrp_uninit();  err4:  	vlan_gvrp_uninit();  err3: @@ -709,11 +674,10 @@ static void __exit vlan_cleanup_module(void)  	unregister_netdevice_notifier(&vlan_notifier_block); -	dev_remove_pack(&vlan_packet_type); -  	unregister_pernet_subsys(&vlan_net_ops);  	rcu_barrier(); /* Wait for completion of call_rcu()'s */ +	vlan_mvrp_uninit();  	vlan_gvrp_uninit();  } diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 5687c9b95f3..9d010a09ab9 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -3,80 +3,100 @@  #include <linux/if_vlan.h>  #include <linux/u64_stats_sync.h> +#include <linux/list.h> - -/** - *	struct vlan_priority_tci_mapping - vlan egress priority mappings - *	@priority: skb priority - *	@vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 - *	@next: pointer to next struct +/* if this changes, algorithm will have to be reworked because this + * depends on completely exhausting the VLAN identifier space.  Thus + * it gives constant time look-up, but in many cases it wastes memory.   */ -struct vlan_priority_tci_mapping { -	u32					priority; -	u16					vlan_qos; -	struct vlan_priority_tci_mapping	*next; +#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8 +#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) + +enum vlan_protos { +	VLAN_PROTO_8021Q	= 0, +	VLAN_PROTO_8021AD, +	VLAN_PROTO_NUM,  }; +struct vlan_group { +	unsigned int		nr_vlan_devs; +	struct hlist_node	hlist;	/* linked list */ +	struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM] +					       [VLAN_GROUP_ARRAY_SPLIT_PARTS]; +}; -/** - *	struct vlan_pcpu_stats - VLAN percpu rx/tx stats - *	@rx_packets: number of received packets - *	@rx_bytes: number of received bytes - *	@rx_multicast: number of received multicast packets - *	@tx_packets: number of transmitted packets - *	@tx_bytes: number of transmitted bytes - *	@syncp: synchronization point for 64bit counters - *	@rx_errors: number of rx errors - *	@tx_dropped: number of tx drops - */ -struct vlan_pcpu_stats { -	u64			rx_packets; -	u64			rx_bytes; -	u64			rx_multicast; -	u64			tx_packets; -	u64			tx_bytes; -	struct u64_stats_sync	syncp; -	u32			rx_errors; -	u32			tx_dropped; +struct vlan_info { +	struct net_device	*real_dev; /* The ethernet(like) device +					    * the vlan is attached to. +					    */ +	struct vlan_group	grp; +	struct list_head	vid_list; +	unsigned int		nr_vids; +	struct rcu_head		rcu;  }; -/** - *	struct vlan_dev_info - VLAN private device data - *	@nr_ingress_mappings: number of ingress priority mappings - *	@ingress_priority_map: ingress priority mappings - *	@nr_egress_mappings: number of egress priority mappings - *	@egress_priority_map: hash of egress priority mappings - *	@vlan_id: VLAN identifier - *	@flags: device flags - *	@real_dev: underlying netdevice - *	@real_dev_addr: address of underlying netdevice - *	@dent: proc dir entry - *	@vlan_pcpu_stats: ptr to percpu rx stats - */ -struct vlan_dev_info { -	unsigned int				nr_ingress_mappings; -	u32					ingress_priority_map[8]; -	unsigned int				nr_egress_mappings; -	struct vlan_priority_tci_mapping	*egress_priority_map[16]; +static inline unsigned int vlan_proto_idx(__be16 proto) +{ +	switch (proto) { +	case htons(ETH_P_8021Q): +		return VLAN_PROTO_8021Q; +	case htons(ETH_P_8021AD): +		return VLAN_PROTO_8021AD; +	default: +		BUG(); +		return 0; +	} +} -	u16					vlan_id; -	u16					flags; +static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg, +							 unsigned int pidx, +							 u16 vlan_id) +{ +	struct net_device **array; -	struct net_device			*real_dev; -	unsigned char				real_dev_addr[ETH_ALEN]; +	array = vg->vlan_devices_arrays[pidx] +				       [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; +	return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; +} -	struct proc_dir_entry			*dent; -	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats; -}; +static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, +						       __be16 vlan_proto, +						       u16 vlan_id) +{ +	return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id); +} -static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) +static inline void vlan_group_set_device(struct vlan_group *vg, +					 __be16 vlan_proto, u16 vlan_id, +					 struct net_device *dev)  { -	return netdev_priv(dev); +	struct net_device **array; +	if (!vg) +		return; +	array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)] +				       [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; +	array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;  } +/* Must be invoked with rcu_read_lock or with RTNL. */ +static inline struct net_device *vlan_find_dev(struct net_device *real_dev, +					       __be16 vlan_proto, u16 vlan_id) +{ +	struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); + +	if (vlan_info) +		return vlan_group_get_device(&vlan_info->grp, +					     vlan_proto, vlan_id); + +	return NULL; +} + +#define vlan_group_for_each_dev(grp, i, dev) \ +	for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \ +		if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \ +							    (i) % VLAN_N_VID))) +  /* found in vlan_dev.c */ -int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, -		  struct packet_type *ptype, struct net_device *orig_dev);  void vlan_dev_set_ingress_priority(const struct net_device *dev,  				   u32 skb_prio, u16 vlan_prio);  int vlan_dev_set_egress_priority(const struct net_device *dev, @@ -84,7 +104,8 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,  int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);  void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); -int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); +int vlan_check_real_dev(struct net_device *real_dev, +			__be16 protocol, u16 vlan_id);  void vlan_setup(struct net_device *dev);  int register_vlan_dev(struct net_device *dev);  void unregister_vlan_dev(struct net_device *dev, struct list_head *head); @@ -92,18 +113,18 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head);  static inline u32 vlan_get_ingress_priority(struct net_device *dev,  					    u16 vlan_tci)  { -	struct vlan_dev_info *vip = vlan_dev_info(dev); +	struct vlan_dev_priv *vip = vlan_dev_priv(dev);  	return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];  }  #ifdef CONFIG_VLAN_8021Q_GVRP -extern int vlan_gvrp_request_join(const struct net_device *dev); -extern void vlan_gvrp_request_leave(const struct net_device *dev); -extern int vlan_gvrp_init_applicant(struct net_device *dev); -extern void vlan_gvrp_uninit_applicant(struct net_device *dev); -extern int vlan_gvrp_init(void); -extern void vlan_gvrp_uninit(void); +int vlan_gvrp_request_join(const struct net_device *dev); +void vlan_gvrp_request_leave(const struct net_device *dev); +int vlan_gvrp_init_applicant(struct net_device *dev); +void vlan_gvrp_uninit_applicant(struct net_device *dev); +int vlan_gvrp_init(void); +void vlan_gvrp_uninit(void);  #else  static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }  static inline void vlan_gvrp_request_leave(const struct net_device *dev) {} @@ -113,18 +134,29 @@ static inline int vlan_gvrp_init(void) { return 0; }  static inline void vlan_gvrp_uninit(void) {}  #endif +#ifdef CONFIG_VLAN_8021Q_MVRP +int vlan_mvrp_request_join(const struct net_device *dev); +void vlan_mvrp_request_leave(const struct net_device *dev); +int vlan_mvrp_init_applicant(struct net_device *dev); +void vlan_mvrp_uninit_applicant(struct net_device *dev); +int vlan_mvrp_init(void); +void vlan_mvrp_uninit(void); +#else +static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; } +static inline void vlan_mvrp_request_leave(const struct net_device *dev) {} +static inline int vlan_mvrp_init_applicant(struct net_device *dev) { return 0; } +static inline void vlan_mvrp_uninit_applicant(struct net_device *dev) {} +static inline int vlan_mvrp_init(void) { return 0; } +static inline void vlan_mvrp_uninit(void) {} +#endif +  extern const char vlan_fullname[];  extern const char vlan_version[]; -extern int vlan_netlink_init(void); -extern void vlan_netlink_fini(void); +int vlan_netlink_init(void); +void vlan_netlink_fini(void);  extern struct rtnl_link_ops vlan_link_ops; -static inline int is_vlan_dev(struct net_device *dev) -{ -	return dev->priv_flags & IFF_802_1Q_VLAN; -} -  extern int vlan_net_id;  struct proc_dir_entry; diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index ce8e3ab3e7a..75d42776399 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -2,89 +2,414 @@  #include <linux/netdevice.h>  #include <linux/if_vlan.h>  #include <linux/netpoll.h> +#include <linux/export.h>  #include "vlan.h" -bool vlan_hwaccel_do_receive(struct sk_buff **skbp) +bool vlan_do_receive(struct sk_buff **skbp)  {  	struct sk_buff *skb = *skbp; -	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; +	__be16 vlan_proto = skb->vlan_proto; +	u16 vlan_id = vlan_tx_tag_get_id(skb);  	struct net_device *vlan_dev;  	struct vlan_pcpu_stats *rx_stats; -	vlan_dev = vlan_find_dev(skb->dev, vlan_id); -	if (!vlan_dev) { -		if (vlan_id) -			skb->pkt_type = PACKET_OTHERHOST; +	vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id); +	if (!vlan_dev)  		return false; -	}  	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);  	if (unlikely(!skb))  		return false;  	skb->dev = vlan_dev; +	if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { +		/* Our lower layer thinks this is not local, let's make sure. +		 * This allows the VLAN to have a different MAC than the +		 * underlying device, and still route correctly. */ +		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) +			skb->pkt_type = PACKET_HOST; +	} + +	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { +		unsigned int offset = skb->data - skb_mac_header(skb); + +		/* +		 * vlan_insert_tag expect skb->data pointing to mac header. +		 * So change skb->data before calling it and change back to +		 * original position later +		 */ +		skb_push(skb, offset); +		skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, +					      skb->vlan_tci); +		if (!skb) +			return false; +		skb_pull(skb, offset + VLAN_HLEN); +		skb_reset_mac_len(skb); +	} +  	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);  	skb->vlan_tci = 0; -	rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats); +	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);  	u64_stats_update_begin(&rx_stats->syncp);  	rx_stats->rx_packets++;  	rx_stats->rx_bytes += skb->len; - -	switch (skb->pkt_type) { -	case PACKET_BROADCAST: -		break; -	case PACKET_MULTICAST: +	if (skb->pkt_type == PACKET_MULTICAST)  		rx_stats->rx_multicast++; -		break; -	case PACKET_OTHERHOST: -		/* Our lower layer thinks this is not local, let's make sure. -		 * This allows the VLAN to have a different MAC than the -		 * underlying device, and still route correctly. */ -		if (!compare_ether_addr(eth_hdr(skb)->h_dest, -					vlan_dev->dev_addr)) -			skb->pkt_type = PACKET_HOST; -		break; -	}  	u64_stats_update_end(&rx_stats->syncp);  	return true;  } +/* Must be invoked with rcu_read_lock. */ +struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev, +					__be16 vlan_proto, u16 vlan_id) +{ +	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); + +	if (vlan_info) { +		return vlan_group_get_device(&vlan_info->grp, +					     vlan_proto, vlan_id); +	} else { +		/* +		 * Lower devices of master uppers (bonding, team) do not have +		 * grp assigned to themselves. Grp is assigned to upper device +		 * instead. +		 */ +		struct net_device *upper_dev; + +		upper_dev = netdev_master_upper_dev_get_rcu(dev); +		if (upper_dev) +			return __vlan_find_dev_deep_rcu(upper_dev, +						    vlan_proto, vlan_id); +	} + +	return NULL; +} +EXPORT_SYMBOL(__vlan_find_dev_deep_rcu); +  struct net_device *vlan_dev_real_dev(const struct net_device *dev)  { -	return vlan_dev_info(dev)->real_dev; +	struct net_device *ret = vlan_dev_priv(dev)->real_dev; + +	while (is_vlan_dev(ret)) +		ret = vlan_dev_priv(ret)->real_dev; + +	return ret;  }  EXPORT_SYMBOL(vlan_dev_real_dev);  u16 vlan_dev_vlan_id(const struct net_device *dev)  { -	return vlan_dev_info(dev)->vlan_id; +	return vlan_dev_priv(dev)->vlan_id;  }  EXPORT_SYMBOL(vlan_dev_vlan_id); -/* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */ -int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, -		      u16 vlan_tci, int polling) +__be16 vlan_dev_vlan_proto(const struct net_device *dev) +{ +	return vlan_dev_priv(dev)->vlan_proto; +} +EXPORT_SYMBOL(vlan_dev_vlan_proto); + +static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) +{ +	if (skb_cow(skb, skb_headroom(skb)) < 0) { +		kfree_skb(skb); +		return NULL; +	} + +	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); +	skb->mac_header += VLAN_HLEN; +	return skb; +} + +struct sk_buff *vlan_untag(struct sk_buff *skb) +{ +	struct vlan_hdr *vhdr; +	u16 vlan_tci; + +	if (unlikely(vlan_tx_tag_present(skb))) { +		/* vlan_tci is already set-up so leave this for another time */ +		return skb; +	} + +	skb = skb_share_check(skb, GFP_ATOMIC); +	if (unlikely(!skb)) +		goto err_free; + +	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) +		goto err_free; + +	vhdr = (struct vlan_hdr *) skb->data; +	vlan_tci = ntohs(vhdr->h_vlan_TCI); +	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); + +	skb_pull_rcsum(skb, VLAN_HLEN); +	vlan_set_encap_proto(skb, vhdr); + +	skb = vlan_reorder_header(skb); +	if (unlikely(!skb)) +		goto err_free; + +	skb_reset_network_header(skb); +	skb_reset_transport_header(skb); +	skb_reset_mac_len(skb); + +	return skb; + +err_free: +	kfree_skb(skb); +	return NULL; +} +EXPORT_SYMBOL(vlan_untag); + + +/* + * vlan info and vid list + */ + +static void vlan_group_free(struct vlan_group *grp) +{ +	int i, j; + +	for (i = 0; i < VLAN_PROTO_NUM; i++) +		for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++) +			kfree(grp->vlan_devices_arrays[i][j]); +} + +static void vlan_info_free(struct vlan_info *vlan_info) +{ +	vlan_group_free(&vlan_info->grp); +	kfree(vlan_info); +} + +static void vlan_info_rcu_free(struct rcu_head *rcu) +{ +	vlan_info_free(container_of(rcu, struct vlan_info, rcu)); +} + +static struct vlan_info *vlan_info_alloc(struct net_device *dev)  { -	__vlan_hwaccel_put_tag(skb, vlan_tci); -	return polling ? netif_receive_skb(skb) : netif_rx(skb); +	struct vlan_info *vlan_info; + +	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); +	if (!vlan_info) +		return NULL; + +	vlan_info->real_dev = dev; +	INIT_LIST_HEAD(&vlan_info->vid_list); +	return vlan_info; +} + +struct vlan_vid_info { +	struct list_head list; +	__be16 proto; +	u16 vid; +	int refcount; +}; + +static bool vlan_hw_filter_capable(const struct net_device *dev, +				     const struct vlan_vid_info *vid_info) +{ +	if (vid_info->proto == htons(ETH_P_8021Q) && +	    dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) +		return true; +	if (vid_info->proto == htons(ETH_P_8021AD) && +	    dev->features & NETIF_F_HW_VLAN_STAG_FILTER) +		return true; +	return false; +} + +static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, +					       __be16 proto, u16 vid) +{ +	struct vlan_vid_info *vid_info; + +	list_for_each_entry(vid_info, &vlan_info->vid_list, list) { +		if (vid_info->proto == proto && vid_info->vid == vid) +			return vid_info; +	} +	return NULL; +} + +static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) +{ +	struct vlan_vid_info *vid_info; + +	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); +	if (!vid_info) +		return NULL; +	vid_info->proto = proto; +	vid_info->vid = vid; + +	return vid_info; +} + +static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, +			  struct vlan_vid_info **pvid_info) +{ +	struct net_device *dev = vlan_info->real_dev; +	const struct net_device_ops *ops = dev->netdev_ops; +	struct vlan_vid_info *vid_info; +	int err; + +	vid_info = vlan_vid_info_alloc(proto, vid); +	if (!vid_info) +		return -ENOMEM; + +	if (vlan_hw_filter_capable(dev, vid_info)) { +		err =  ops->ndo_vlan_rx_add_vid(dev, proto, vid); +		if (err) { +			kfree(vid_info); +			return err; +		} +	} +	list_add(&vid_info->list, &vlan_info->vid_list); +	vlan_info->nr_vids++; +	*pvid_info = vid_info; +	return 0; +} + +int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) +{ +	struct vlan_info *vlan_info; +	struct vlan_vid_info *vid_info; +	bool vlan_info_created = false; +	int err; + +	ASSERT_RTNL(); + +	vlan_info = rtnl_dereference(dev->vlan_info); +	if (!vlan_info) { +		vlan_info = vlan_info_alloc(dev); +		if (!vlan_info) +			return -ENOMEM; +		vlan_info_created = true; +	} +	vid_info = vlan_vid_info_get(vlan_info, proto, vid); +	if (!vid_info) { +		err = __vlan_vid_add(vlan_info, proto, vid, &vid_info); +		if (err) +			goto out_free_vlan_info; +	} +	vid_info->refcount++; + +	if (vlan_info_created) +		rcu_assign_pointer(dev->vlan_info, vlan_info); + +	return 0; + +out_free_vlan_info: +	if (vlan_info_created) +		kfree(vlan_info); +	return err; +} +EXPORT_SYMBOL(vlan_vid_add); + +static void __vlan_vid_del(struct vlan_info *vlan_info, +			   struct vlan_vid_info *vid_info) +{ +	struct net_device *dev = vlan_info->real_dev; +	const struct net_device_ops *ops = dev->netdev_ops; +	__be16 proto = vid_info->proto; +	u16 vid = vid_info->vid; +	int err; + +	if (vlan_hw_filter_capable(dev, vid_info)) { +		err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid); +		if (err) { +			pr_warn("failed to kill vid %04x/%d for device %s\n", +				proto, vid, dev->name); +		} +	} +	list_del(&vid_info->list); +	kfree(vid_info); +	vlan_info->nr_vids--; +} + +void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) +{ +	struct vlan_info *vlan_info; +	struct vlan_vid_info *vid_info; + +	ASSERT_RTNL(); + +	vlan_info = rtnl_dereference(dev->vlan_info); +	if (!vlan_info) +		return; + +	vid_info = vlan_vid_info_get(vlan_info, proto, vid); +	if (!vid_info) +		return; +	vid_info->refcount--; +	if (vid_info->refcount == 0) { +		__vlan_vid_del(vlan_info, vid_info); +		if (vlan_info->nr_vids == 0) { +			RCU_INIT_POINTER(dev->vlan_info, NULL); +			call_rcu(&vlan_info->rcu, vlan_info_rcu_free); +		} +	} +} +EXPORT_SYMBOL(vlan_vid_del); + +int vlan_vids_add_by_dev(struct net_device *dev, +			 const struct net_device *by_dev) +{ +	struct vlan_vid_info *vid_info; +	struct vlan_info *vlan_info; +	int err; + +	ASSERT_RTNL(); + +	vlan_info = rtnl_dereference(by_dev->vlan_info); +	if (!vlan_info) +		return 0; + +	list_for_each_entry(vid_info, &vlan_info->vid_list, list) { +		err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); +		if (err) +			goto unwind; +	} +	return 0; + +unwind: +	list_for_each_entry_continue_reverse(vid_info, +					     &vlan_info->vid_list, +					     list) { +		vlan_vid_del(dev, vid_info->proto, vid_info->vid); +	} + +	return err;  } -EXPORT_SYMBOL(__vlan_hwaccel_rx); +EXPORT_SYMBOL(vlan_vids_add_by_dev); -gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, -			      unsigned int vlan_tci, struct sk_buff *skb) +void vlan_vids_del_by_dev(struct net_device *dev, +			  const struct net_device *by_dev)  { -	__vlan_hwaccel_put_tag(skb, vlan_tci); -	return napi_gro_receive(napi, skb); +	struct vlan_vid_info *vid_info; +	struct vlan_info *vlan_info; + +	ASSERT_RTNL(); + +	vlan_info = rtnl_dereference(by_dev->vlan_info); +	if (!vlan_info) +		return; + +	list_for_each_entry(vid_info, &vlan_info->vid_list, list) +		vlan_vid_del(dev, vid_info->proto, vid_info->vid);  } -EXPORT_SYMBOL(vlan_gro_receive); +EXPORT_SYMBOL(vlan_vids_del_by_dev); -gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, -			    unsigned int vlan_tci) +bool vlan_uses_dev(const struct net_device *dev)  { -	__vlan_hwaccel_put_tag(napi->skb, vlan_tci); -	return napi_gro_frags(napi); +	struct vlan_info *vlan_info; + +	ASSERT_RTNL(); + +	vlan_info = rtnl_dereference(dev->vlan_info); +	if (!vlan_info) +		return false; +	return vlan_info->grp.nr_vlan_devs ? true : false;  } -EXPORT_SYMBOL(vlan_gro_frags); +EXPORT_SYMBOL(vlan_uses_dev); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index be737539f34..dd11f612e03 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -20,6 +20,8 @@   *		2 of the License, or (at your option) any later version.   */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +  #include <linux/module.h>  #include <linux/slab.h>  #include <linux/skbuff.h> @@ -31,6 +33,7 @@  #include "vlan.h"  #include "vlanproc.h"  #include <linux/if_vlan.h> +#include <linux/netpoll.h>  /*   *	Rebuild the Ethernet MAC header. This is called after an ARP @@ -55,206 +58,16 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)  		return arp_find(veth->h_dest, skb);  #endif  	default: -		pr_debug("%s: unable to resolve type %X addresses.\n", +		pr_debug("%s: unable to resolve type %X addresses\n",  			 dev->name, ntohs(veth->h_vlan_encapsulated_proto)); -		memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); +		ether_addr_copy(veth->h_source, dev->dev_addr);  		break;  	}  	return 0;  } -static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) -{ -	if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { -		if (skb_cow(skb, skb_headroom(skb)) < 0) -			skb = NULL; -		if (skb) { -			/* Lifted from Gleb's VLAN code... */ -			memmove(skb->data - ETH_HLEN, -				skb->data - VLAN_ETH_HLEN, 12); -			skb->mac_header += VLAN_HLEN; -		} -	} - -	return skb; -} - -static inline void vlan_set_encap_proto(struct sk_buff *skb, -		struct vlan_hdr *vhdr) -{ -	__be16 proto; -	unsigned char *rawp; - -	/* -	 * Was a VLAN packet, grab the encapsulated protocol, which the layer -	 * three protocols care about. -	 */ - -	proto = vhdr->h_vlan_encapsulated_proto; -	if (ntohs(proto) >= 1536) { -		skb->protocol = proto; -		return; -	} - -	rawp = skb->data; -	if (*(unsigned short *)rawp == 0xFFFF) -		/* -		 * This is a magic hack to spot IPX packets. Older Novell -		 * breaks the protocol design and runs IPX over 802.3 without -		 * an 802.2 LLC layer. We look for FFFF which isn't a used -		 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware -		 * but does for the rest. -		 */ -		skb->protocol = htons(ETH_P_802_3); -	else -		/* -		 * Real 802.2 LLC -		 */ -		skb->protocol = htons(ETH_P_802_2); -} - -/* - *	Determine the packet's protocol ID. The rule here is that we - *	assume 802.3 if the type field is short enough to be a length. - *	This is normal practice and works for any 'now in use' protocol. - * - *  Also, at this point we assume that we ARE dealing exclusively with - *  VLAN packets, or packets that should be made into VLAN packets based - *  on a default VLAN ID. - * - *  NOTE:  Should be similar to ethernet/eth.c. - * - *  SANITY NOTE:  This method is called when a packet is moving up the stack - *                towards userland.  To get here, it would have already passed - *                through the ethernet/eth.c eth_type_trans() method. - *  SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be - *                 stored UNALIGNED in the memory.  RISC systems don't like - *                 such cases very much... - *  SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be - *  		    aligned, so there doesn't need to be any of the unaligned - *  		    stuff.  It has been commented out now...  --Ben - * - */ -int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, -		  struct packet_type *ptype, struct net_device *orig_dev) -{ -	struct vlan_hdr *vhdr; -	struct vlan_pcpu_stats *rx_stats; -	struct net_device *vlan_dev; -	u16 vlan_id; -	u16 vlan_tci; - -	skb = skb_share_check(skb, GFP_ATOMIC); -	if (skb == NULL) -		goto err_free; - -	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) -		goto err_free; - -	vhdr = (struct vlan_hdr *)skb->data; -	vlan_tci = ntohs(vhdr->h_vlan_TCI); -	vlan_id = vlan_tci & VLAN_VID_MASK; - -	rcu_read_lock(); -	vlan_dev = vlan_find_dev(dev, vlan_id); - -	/* If the VLAN device is defined, we use it. -	 * If not, and the VID is 0, it is a 802.1p packet (not -	 * really a VLAN), so we will just netif_rx it later to the -	 * original interface, but with the skb->proto set to the -	 * wrapped proto: we do nothing here. -	 */ - -	if (!vlan_dev) { -		if (vlan_id) { -			pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", -				 __func__, vlan_id, dev->name); -			goto err_unlock; -		} -		rx_stats = NULL; -	} else { -		skb->dev = vlan_dev; - -		rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats); - -		u64_stats_update_begin(&rx_stats->syncp); -		rx_stats->rx_packets++; -		rx_stats->rx_bytes += skb->len; - -		skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); - -		pr_debug("%s: priority: %u for TCI: %hu\n", -			 __func__, skb->priority, vlan_tci); - -		switch (skb->pkt_type) { -		case PACKET_BROADCAST: -			/* Yeah, stats collect these together.. */ -			/* stats->broadcast ++; // no such counter :-( */ -			break; - -		case PACKET_MULTICAST: -			rx_stats->rx_multicast++; -			break; - -		case PACKET_OTHERHOST: -			/* Our lower layer thinks this is not local, let's make -			 * sure. -			 * This allows the VLAN to have a different MAC than the -			 * underlying device, and still route correctly. -			 */ -			if (!compare_ether_addr(eth_hdr(skb)->h_dest, -						skb->dev->dev_addr)) -				skb->pkt_type = PACKET_HOST; -			break; -		default: -			break; -		} -		u64_stats_update_end(&rx_stats->syncp); -	} - -	skb_pull_rcsum(skb, VLAN_HLEN); -	vlan_set_encap_proto(skb, vhdr); - -	if (vlan_dev) { -		skb = vlan_check_reorder_header(skb); -		if (!skb) { -			rx_stats->rx_errors++; -			goto err_unlock; -		} -	} - -	netif_rx(skb); - -	rcu_read_unlock(); -	return NET_RX_SUCCESS; - -err_unlock: -	rcu_read_unlock(); -err_free: -	atomic_long_inc(&dev->rx_dropped); -	kfree_skb(skb); -	return NET_RX_DROP; -} - -static inline u16 -vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) -{ -	struct vlan_priority_tci_mapping *mp; - -	mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)]; -	while (mp) { -		if (mp->priority == skb->priority) { -			return mp->vlan_qos; /* This should already be shifted -					      * to mask correctly with the -					      * VLAN's TCI */ -		} -		mp = mp->next; -	} -	return 0; -} -  /*   *	Create the VLAN header for an arbitrary protocol layer   * @@ -269,16 +82,17 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,  				const void *daddr, const void *saddr,  				unsigned int len)  { +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct vlan_hdr *vhdr;  	unsigned int vhdrlen = 0;  	u16 vlan_tci = 0;  	int rc; -	if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) { +	if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {  		vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); -		vlan_tci = vlan_dev_info(dev)->vlan_id; -		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); +		vlan_tci = vlan->vlan_id; +		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);  		vhdr->h_vlan_TCI = htons(vlan_tci);  		/* @@ -290,8 +104,8 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,  		else  			vhdr->h_vlan_encapsulated_proto = htons(len); -		skb->protocol = htons(ETH_P_8021Q); -		type = ETH_P_8021Q; +		skb->protocol = vlan->vlan_proto; +		type = ntohs(vlan->vlan_proto);  		vhdrlen = VLAN_HLEN;  	} @@ -300,16 +114,28 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,  		saddr = dev->dev_addr;  	/* Now make the underlying real hard header */ -	dev = vlan_dev_info(dev)->real_dev; +	dev = vlan->real_dev;  	rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);  	if (rc > 0)  		rc += vhdrlen;  	return rc;  } +static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) +{ +#ifdef CONFIG_NET_POLL_CONTROLLER +	if (vlan->netpoll) +		netpoll_send_skb(vlan->netpoll, skb); +#else +	BUG(); +#endif +	return NETDEV_TX_OK; +} +  static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,  					    struct net_device *dev)  { +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);  	unsigned int len;  	int ret; @@ -319,28 +145,31 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,  	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING  	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...  	 */ -	if (veth->h_vlan_proto != htons(ETH_P_8021Q) || -	    vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { +	if (veth->h_vlan_proto != vlan->vlan_proto || +	    vlan->flags & VLAN_FLAG_REORDER_HDR) {  		u16 vlan_tci; -		vlan_tci = vlan_dev_info(dev)->vlan_id; -		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); -		skb = __vlan_hwaccel_put_tag(skb, vlan_tci); +		vlan_tci = vlan->vlan_id; +		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); +		skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);  	} -	skb_set_dev(skb, vlan_dev_info(dev)->real_dev); +	skb->dev = vlan->real_dev;  	len = skb->len; +	if (unlikely(netpoll_tx_running(dev))) +		return vlan_netpoll_send_skb(vlan, skb); +  	ret = dev_queue_xmit(skb);  	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {  		struct vlan_pcpu_stats *stats; -		stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats); +		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);  		u64_stats_update_begin(&stats->syncp);  		stats->tx_packets++;  		stats->tx_bytes += len; -		u64_stats_update_begin(&stats->syncp); +		u64_stats_update_end(&stats->syncp);  	} else { -		this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); +		this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);  	}  	return ret; @@ -351,7 +180,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)  	/* TODO: gotta make sure the underlying layer can handle it,  	 * maybe an IFF_VLAN_CAPABLE flag for devices?  	 */ -	if (vlan_dev_info(dev)->real_dev->mtu < new_mtu) +	if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)  		return -ERANGE;  	dev->mtu = new_mtu; @@ -362,7 +191,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)  void vlan_dev_set_ingress_priority(const struct net_device *dev,  				   u32 skb_prio, u16 vlan_prio)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)  		vlan->nr_ingress_mappings--; @@ -375,7 +204,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,  int vlan_dev_set_egress_priority(const struct net_device *dev,  				 u32 skb_prio, u16 vlan_prio)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct vlan_priority_tci_mapping *mp = NULL;  	struct vlan_priority_tci_mapping *np;  	u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; @@ -403,6 +232,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,  	np->next = mp;  	np->priority = skb_prio;  	np->vlan_qos = vlan_qos; +	/* Before inserting this element in hash table, make sure all its fields +	 * are committed to memory. +	 * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() +	 */ +	smp_wmb();  	vlan->egress_priority_map[skb_prio & 0xF] = np;  	if (vlan_qos)  		vlan->nr_egress_mappings++; @@ -412,11 +246,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,  /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */  int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	u32 old_flags = vlan->flags;  	if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | -		     VLAN_FLAG_LOOSE_BINDING)) +		     VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))  		return -EINVAL;  	vlan->flags = (old_flags & ~mask) | (flags & mask); @@ -427,17 +261,24 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)  		else  			vlan_gvrp_request_leave(dev);  	} + +	if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) { +		if (vlan->flags & VLAN_FLAG_MVRP) +			vlan_mvrp_request_join(dev); +		else +			vlan_mvrp_request_leave(dev); +	}  	return 0;  }  void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)  { -	strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); +	strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);  }  static int vlan_dev_open(struct net_device *dev)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct net_device *real_dev = vlan->real_dev;  	int err; @@ -445,7 +286,7 @@ static int vlan_dev_open(struct net_device *dev)  	    !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))  		return -ENETDOWN; -	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { +	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {  		err = dev_uc_add(real_dev, dev->dev_addr);  		if (err < 0)  			goto out; @@ -462,11 +303,14 @@ static int vlan_dev_open(struct net_device *dev)  			goto clear_allmulti;  	} -	memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN); +	ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);  	if (vlan->flags & VLAN_FLAG_GVRP)  		vlan_gvrp_request_join(dev); +	if (vlan->flags & VLAN_FLAG_MVRP) +		vlan_mvrp_request_join(dev); +  	if (netif_carrier_ok(real_dev))  		netif_carrier_on(dev);  	return 0; @@ -475,7 +319,7 @@ clear_allmulti:  	if (dev->flags & IFF_ALLMULTI)  		dev_set_allmulti(real_dev, -1);  del_unicast: -	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) +	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))  		dev_uc_del(real_dev, dev->dev_addr);  out:  	netif_carrier_off(dev); @@ -484,12 +328,9 @@ out:  static int vlan_dev_stop(struct net_device *dev)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct net_device *real_dev = vlan->real_dev; -	if (vlan->flags & VLAN_FLAG_GVRP) -		vlan_gvrp_request_leave(dev); -  	dev_mc_unsync(real_dev, dev);  	dev_uc_unsync(real_dev, dev);  	if (dev->flags & IFF_ALLMULTI) @@ -497,7 +338,7 @@ static int vlan_dev_stop(struct net_device *dev)  	if (dev->flags & IFF_PROMISC)  		dev_set_promiscuity(real_dev, -1); -	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) +	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))  		dev_uc_del(real_dev, dev->dev_addr);  	netif_carrier_off(dev); @@ -506,7 +347,7 @@ static int vlan_dev_stop(struct net_device *dev)  static int vlan_dev_set_mac_address(struct net_device *dev, void *p)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	struct sockaddr *addr = p;  	int err; @@ -516,23 +357,23 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)  	if (!(dev->flags & IFF_UP))  		goto out; -	if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { +	if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {  		err = dev_uc_add(real_dev, addr->sa_data);  		if (err < 0)  			return err;  	} -	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) +	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))  		dev_uc_del(real_dev, dev->dev_addr);  out: -	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); +	ether_addr_copy(dev->dev_addr, addr->sa_data);  	return 0;  }  static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	const struct net_device_ops *ops = real_dev->netdev_ops;  	struct ifreq ifrr;  	int err = -EOPNOTSUPP; @@ -557,7 +398,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)  static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	const struct net_device_ops *ops = real_dev->netdev_ops;  	int err = 0; @@ -567,11 +408,11 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)  	return err;  } -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#if IS_ENABLED(CONFIG_FCOE)  static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,  				   struct scatterlist *sgl, unsigned int sgc)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	const struct net_device_ops *ops = real_dev->netdev_ops;  	int rc = 0; @@ -583,7 +424,7 @@ static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,  static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	const struct net_device_ops *ops = real_dev->netdev_ops;  	int len = 0; @@ -595,7 +436,7 @@ static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)  static int vlan_dev_fcoe_enable(struct net_device *dev)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	const struct net_device_ops *ops = real_dev->netdev_ops;  	int rc = -EINVAL; @@ -606,7 +447,7 @@ static int vlan_dev_fcoe_enable(struct net_device *dev)  static int vlan_dev_fcoe_disable(struct net_device *dev)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	const struct net_device_ops *ops = real_dev->netdev_ops;  	int rc = -EINVAL; @@ -617,7 +458,7 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)  static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	const struct net_device_ops *ops = real_dev->netdev_ops;  	int rc = -EINVAL; @@ -625,22 +466,37 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)  		rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);  	return rc;  } + +static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, +				    struct scatterlist *sgl, unsigned int sgc) +{ +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; +	const struct net_device_ops *ops = real_dev->netdev_ops; +	int rc = 0; + +	if (ops->ndo_fcoe_ddp_target) +		rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); + +	return rc; +}  #endif  static void vlan_dev_change_rx_flags(struct net_device *dev, int change)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; -	if (change & IFF_ALLMULTI) -		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); -	if (change & IFF_PROMISC) -		dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); +	if (dev->flags & IFF_UP) { +		if (change & IFF_ALLMULTI) +			dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); +		if (change & IFF_PROMISC) +			dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); +	}  }  static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)  { -	dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); -	dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); +	dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); +	dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);  }  /* @@ -668,18 +524,46 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)  	netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);  } +static int vlan_dev_get_lock_subclass(struct net_device *dev) +{ +	return vlan_dev_priv(dev)->nest_level; +} +  static const struct header_ops vlan_header_ops = {  	.create	 = vlan_dev_hard_header,  	.rebuild = vlan_dev_rebuild_header,  	.parse	 = eth_header_parse,  }; +static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, +				     unsigned short type, +				     const void *daddr, const void *saddr, +				     unsigned int len) +{ +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev); +	struct net_device *real_dev = vlan->real_dev; + +	if (saddr == NULL) +		saddr = dev->dev_addr; + +	return dev_hard_header(skb, real_dev, type, daddr, saddr, len); +} + +static const struct header_ops vlan_passthru_header_ops = { +	.create	 = vlan_passthru_hard_header, +	.rebuild = dev_rebuild_header, +	.parse	 = eth_header_parse, +}; + +static struct device_type vlan_type = { +	.name	= "vlan", +}; +  static const struct net_device_ops vlan_netdev_ops;  static int vlan_dev_init(struct net_device *dev)  { -	struct net_device *real_dev = vlan_dev_info(dev)->real_dev; -	int subclass = 0; +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;  	netif_carrier_off(dev); @@ -691,24 +575,33 @@ static int vlan_dev_init(struct net_device *dev)  					  (1<<__LINK_STATE_DORMANT))) |  		      (1<<__LINK_STATE_PRESENT); -	dev->features |= real_dev->features & real_dev->vlan_features; -	dev->features |= NETIF_F_LLTX; +	dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG | +			   NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | +			   NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM | +			   NETIF_F_ALL_FCOE; + +	dev->features |= real_dev->vlan_features | NETIF_F_LLTX;  	dev->gso_max_size = real_dev->gso_max_size; +	if (dev->features & NETIF_F_VLAN_FEATURES) +		netdev_warn(real_dev, "VLAN features are set incorrectly.  Q-in-Q configurations may not work correctly.\n"); +  	/* ipv6 shared card related stuff */  	dev->dev_id = real_dev->dev_id;  	if (is_zero_ether_addr(dev->dev_addr)) -		memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len); +		eth_hw_addr_inherit(dev, real_dev);  	if (is_zero_ether_addr(dev->broadcast))  		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#if IS_ENABLED(CONFIG_FCOE)  	dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;  #endif -	if (real_dev->features & NETIF_F_HW_VLAN_TX) { -		dev->header_ops      = real_dev->header_ops; +	dev->needed_headroom = real_dev->needed_headroom; +	if (vlan_hw_offload_capable(real_dev->features, +				    vlan_dev_priv(dev)->vlan_proto)) { +		dev->header_ops      = &vlan_passthru_header_ops;  		dev->hard_header_len = real_dev->hard_header_len;  	} else {  		dev->header_ops      = &vlan_header_ops; @@ -717,13 +610,12 @@ static int vlan_dev_init(struct net_device *dev)  	dev->netdev_ops = &vlan_netdev_ops; -	if (is_vlan_dev(real_dev)) -		subclass = 1; +	SET_NETDEV_DEVTYPE(dev, &vlan_type); -	vlan_dev_set_lockdep_class(dev, subclass); +	vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev)); -	vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats); -	if (!vlan_dev_info(dev)->vlan_pcpu_stats) +	vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); +	if (!vlan_dev_priv(dev)->vlan_pcpu_stats)  		return -ENOMEM;  	return 0; @@ -732,11 +624,9 @@ static int vlan_dev_init(struct net_device *dev)  static void vlan_dev_uninit(struct net_device *dev)  {  	struct vlan_priority_tci_mapping *pm; -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	int i; -	free_percpu(vlan->vlan_pcpu_stats); -	vlan->vlan_pcpu_stats = NULL;  	for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {  		while ((pm = vlan->egress_priority_map[i]) != NULL) {  			vlan->egress_priority_map[i] = pm->next; @@ -745,96 +635,121 @@ static void vlan_dev_uninit(struct net_device *dev)  	}  } +static netdev_features_t vlan_dev_fix_features(struct net_device *dev, +	netdev_features_t features) +{ +	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; +	netdev_features_t old_features = features; + +	features = netdev_intersect_features(features, real_dev->vlan_features); +	features |= NETIF_F_RXCSUM; +	features = netdev_intersect_features(features, real_dev->features); + +	features |= old_features & NETIF_F_SOFT_FEATURES; +	features |= NETIF_F_LLTX; + +	return features; +} +  static int vlan_ethtool_get_settings(struct net_device *dev,  				     struct ethtool_cmd *cmd)  { -	const struct vlan_dev_info *vlan = vlan_dev_info(dev); -	return dev_ethtool_get_settings(vlan->real_dev, cmd); +	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + +	return __ethtool_get_settings(vlan->real_dev, cmd);  }  static void vlan_ethtool_get_drvinfo(struct net_device *dev,  				     struct ethtool_drvinfo *info)  { -	strcpy(info->driver, vlan_fullname); -	strcpy(info->version, vlan_version); -	strcpy(info->fw_version, "N/A"); +	strlcpy(info->driver, vlan_fullname, sizeof(info->driver)); +	strlcpy(info->version, vlan_version, sizeof(info->version)); +	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));  } -static u32 vlan_ethtool_get_rx_csum(struct net_device *dev) +static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)  { -	const struct vlan_dev_info *vlan = vlan_dev_info(dev); -	return dev_ethtool_get_rx_csum(vlan->real_dev); +	struct vlan_pcpu_stats *p; +	u32 rx_errors = 0, tx_dropped = 0; +	int i; + +	for_each_possible_cpu(i) { +		u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; +		unsigned int start; + +		p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); +		do { +			start = u64_stats_fetch_begin_irq(&p->syncp); +			rxpackets	= p->rx_packets; +			rxbytes		= p->rx_bytes; +			rxmulticast	= p->rx_multicast; +			txpackets	= p->tx_packets; +			txbytes		= p->tx_bytes; +		} while (u64_stats_fetch_retry_irq(&p->syncp, start)); + +		stats->rx_packets	+= rxpackets; +		stats->rx_bytes		+= rxbytes; +		stats->multicast	+= rxmulticast; +		stats->tx_packets	+= txpackets; +		stats->tx_bytes		+= txbytes; +		/* rx_errors & tx_dropped are u32 */ +		rx_errors	+= p->rx_errors; +		tx_dropped	+= p->tx_dropped; +	} +	stats->rx_errors  = rx_errors; +	stats->tx_dropped = tx_dropped; + +	return stats;  } -static u32 vlan_ethtool_get_flags(struct net_device *dev) +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vlan_dev_poll_controller(struct net_device *dev)  { -	const struct vlan_dev_info *vlan = vlan_dev_info(dev); -	return dev_ethtool_get_flags(vlan->real_dev); +	return;  } -static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)  { +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev); +	struct net_device *real_dev = vlan->real_dev; +	struct netpoll *netpoll; +	int err = 0; -	if (vlan_dev_info(dev)->vlan_pcpu_stats) { -		struct vlan_pcpu_stats *p; -		u32 rx_errors = 0, tx_dropped = 0; -		int i; - -		for_each_possible_cpu(i) { -			u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; -			unsigned int start; - -			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i); -			do { -				start = u64_stats_fetch_begin_bh(&p->syncp); -				rxpackets	= p->rx_packets; -				rxbytes		= p->rx_bytes; -				rxmulticast	= p->rx_multicast; -				txpackets	= p->tx_packets; -				txbytes		= p->tx_bytes; -			} while (u64_stats_fetch_retry_bh(&p->syncp, start)); - -			stats->rx_packets	+= rxpackets; -			stats->rx_bytes		+= rxbytes; -			stats->multicast	+= rxmulticast; -			stats->tx_packets	+= txpackets; -			stats->tx_bytes		+= txbytes; -			/* rx_errors & tx_dropped are u32 */ -			rx_errors	+= p->rx_errors; -			tx_dropped	+= p->tx_dropped; -		} -		stats->rx_errors  = rx_errors; -		stats->tx_dropped = tx_dropped; +	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); +	err = -ENOMEM; +	if (!netpoll) +		goto out; + +	err = __netpoll_setup(netpoll, real_dev); +	if (err) { +		kfree(netpoll); +		goto out;  	} -	return stats; + +	vlan->netpoll = netpoll; + +out: +	return err;  } -static int vlan_ethtool_set_tso(struct net_device *dev, u32 data) +static void vlan_dev_netpoll_cleanup(struct net_device *dev)  { -       if (data) { -		struct net_device *real_dev = vlan_dev_info(dev)->real_dev; +	struct vlan_dev_priv *vlan= vlan_dev_priv(dev); +	struct netpoll *netpoll = vlan->netpoll; -		/* Underlying device must support TSO for VLAN-tagged packets -		 * and must have TSO enabled now. -		 */ -		if (!(real_dev->vlan_features & NETIF_F_TSO)) -			return -EOPNOTSUPP; -		if (!(real_dev->features & NETIF_F_TSO)) -			return -EINVAL; -		dev->features |= NETIF_F_TSO; -	} else { -		dev->features &= ~NETIF_F_TSO; -	} -	return 0; +	if (!netpoll) +		return; + +	vlan->netpoll = NULL; + +	__netpoll_free_async(netpoll);  } +#endif /* CONFIG_NET_POLL_CONTROLLER */  static const struct ethtool_ops vlan_ethtool_ops = {  	.get_settings	        = vlan_ethtool_get_settings,  	.get_drvinfo	        = vlan_ethtool_get_drvinfo,  	.get_link		= ethtool_op_get_link, -	.get_rx_csum		= vlan_ethtool_get_rx_csum, -	.get_flags		= vlan_ethtool_get_flags, -	.set_tso                = vlan_ethtool_set_tso,  };  static const struct net_device_ops vlan_netdev_ops = { @@ -847,30 +762,46 @@ static const struct net_device_ops vlan_netdev_ops = {  	.ndo_validate_addr	= eth_validate_addr,  	.ndo_set_mac_address	= vlan_dev_set_mac_address,  	.ndo_set_rx_mode	= vlan_dev_set_rx_mode, -	.ndo_set_multicast_list	= vlan_dev_set_rx_mode,  	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,  	.ndo_do_ioctl		= vlan_dev_ioctl,  	.ndo_neigh_setup	= vlan_dev_neigh_setup,  	.ndo_get_stats64	= vlan_dev_get_stats64, -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#if IS_ENABLED(CONFIG_FCOE)  	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,  	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,  	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,  	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,  	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn, +	.ndo_fcoe_ddp_target	= vlan_dev_fcoe_ddp_target, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER +	.ndo_poll_controller	= vlan_dev_poll_controller, +	.ndo_netpoll_setup	= vlan_dev_netpoll_setup, +	.ndo_netpoll_cleanup	= vlan_dev_netpoll_cleanup,  #endif +	.ndo_fix_features	= vlan_dev_fix_features, +	.ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,  }; +static void vlan_dev_free(struct net_device *dev) +{ +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + +	free_percpu(vlan->vlan_pcpu_stats); +	vlan->vlan_pcpu_stats = NULL; +	free_netdev(dev); +} +  void vlan_setup(struct net_device *dev)  {  	ether_setup(dev);  	dev->priv_flags		|= IFF_802_1Q_VLAN; -	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE; +	dev->priv_flags		&= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);  	dev->tx_queue_len	= 0;  	dev->netdev_ops		= &vlan_netdev_ops; -	dev->destructor		= free_netdev; +	dev->destructor		= vlan_dev_free;  	dev->ethtool_ops	= &vlan_ethtool_ops;  	memset(dev->broadcast, 0, ETH_ALEN); diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c index 061ceceeef1..66a80320b03 100644 --- a/net/8021q/vlan_gvrp.c +++ b/net/8021q/vlan_gvrp.c @@ -29,18 +29,22 @@ static struct garp_application vlan_gvrp_app __read_mostly = {  int vlan_gvrp_request_join(const struct net_device *dev)  { -	const struct vlan_dev_info *vlan = vlan_dev_info(dev); +	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	__be16 vlan_id = htons(vlan->vlan_id); +	if (vlan->vlan_proto != htons(ETH_P_8021Q)) +		return 0;  	return garp_request_join(vlan->real_dev, &vlan_gvrp_app,  				 &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);  }  void vlan_gvrp_request_leave(const struct net_device *dev)  { -	const struct vlan_dev_info *vlan = vlan_dev_info(dev); +	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	__be16 vlan_id = htons(vlan->vlan_id); +	if (vlan->vlan_proto != htons(ETH_P_8021Q)) +		return;  	garp_request_leave(vlan->real_dev, &vlan_gvrp_app,  			   &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);  } diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c new file mode 100644 index 00000000000..e0fe091801b --- /dev/null +++ b/net/8021q/vlan_mvrp.c @@ -0,0 +1,76 @@ +/* + *	IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP) + * + *	Copyright (c) 2012 Massachusetts Institute of Technology + * + *	Adapted from code in net/8021q/vlan_gvrp.c + *	Copyright (c) 2008 Patrick McHardy <kaber@trash.net> + * + *	This program is free software; you can redistribute it and/or + *	modify it under the terms of the GNU General Public License + *	version 2 as published by the Free Software Foundation. + */ +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <net/mrp.h> +#include "vlan.h" + +#define MRP_MVRP_ADDRESS	{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 } + +enum mvrp_attributes { +	MVRP_ATTR_INVALID, +	MVRP_ATTR_VID, +	__MVRP_ATTR_MAX +}; +#define MVRP_ATTR_MAX	(__MVRP_ATTR_MAX - 1) + +static struct mrp_application vlan_mrp_app __read_mostly = { +	.type		= MRP_APPLICATION_MVRP, +	.maxattr	= MVRP_ATTR_MAX, +	.pkttype.type	= htons(ETH_P_MVRP), +	.group_address	= MRP_MVRP_ADDRESS, +	.version	= 0, +}; + +int vlan_mvrp_request_join(const struct net_device *dev) +{ +	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); +	__be16 vlan_id = htons(vlan->vlan_id); + +	if (vlan->vlan_proto != htons(ETH_P_8021Q)) +		return 0; +	return mrp_request_join(vlan->real_dev, &vlan_mrp_app, +				&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); +} + +void vlan_mvrp_request_leave(const struct net_device *dev) +{ +	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); +	__be16 vlan_id = htons(vlan->vlan_id); + +	if (vlan->vlan_proto != htons(ETH_P_8021Q)) +		return; +	mrp_request_leave(vlan->real_dev, &vlan_mrp_app, +			  &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); +} + +int vlan_mvrp_init_applicant(struct net_device *dev) +{ +	return mrp_init_applicant(dev, &vlan_mrp_app); +} + +void vlan_mvrp_uninit_applicant(struct net_device *dev) +{ +	mrp_uninit_applicant(dev, &vlan_mrp_app); +} + +int __init vlan_mvrp_init(void) +{ +	return mrp_register_application(&vlan_mrp_app); +} + +void vlan_mvrp_uninit(void) +{ +	mrp_unregister_application(&vlan_mrp_app); +} diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index be9a5c19a77..8ac8a5cc214 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -11,6 +11,7 @@  #include <linux/kernel.h>  #include <linux/netdevice.h>  #include <linux/if_vlan.h> +#include <linux/module.h>  #include <net/net_namespace.h>  #include <net/netlink.h>  #include <net/rtnetlink.h> @@ -22,6 +23,7 @@ static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {  	[IFLA_VLAN_FLAGS]	= { .len = sizeof(struct ifla_vlan_flags) },  	[IFLA_VLAN_EGRESS_QOS]	= { .type = NLA_NESTED },  	[IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, +	[IFLA_VLAN_PROTOCOL]	= { .type = NLA_U16 },  };  static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { @@ -52,6 +54,16 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])  	if (!data)  		return -EINVAL; +	if (data[IFLA_VLAN_PROTOCOL]) { +		switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) { +		case htons(ETH_P_8021Q): +		case htons(ETH_P_8021AD): +			break; +		default: +			return -EPROTONOSUPPORT; +		} +	} +  	if (data[IFLA_VLAN_ID]) {  		id = nla_get_u16(data[IFLA_VLAN_ID]);  		if (id >= VLAN_VID_MASK) @@ -61,7 +73,7 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])  		flags = nla_data(data[IFLA_VLAN_FLAGS]);  		if ((flags->flags & flags->mask) &  		    ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | -		      VLAN_FLAG_LOOSE_BINDING)) +		      VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))  			return -EINVAL;  	} @@ -104,8 +116,9 @@ static int vlan_changelink(struct net_device *dev,  static int vlan_newlink(struct net *src_net, struct net_device *dev,  			struct nlattr *tb[], struct nlattr *data[])  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct net_device *real_dev; +	__be16 proto;  	int err;  	if (!data[IFLA_VLAN_ID]) @@ -117,11 +130,17 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,  	if (!real_dev)  		return -ENODEV; -	vlan->vlan_id  = nla_get_u16(data[IFLA_VLAN_ID]); -	vlan->real_dev = real_dev; -	vlan->flags    = VLAN_FLAG_REORDER_HDR; +	if (data[IFLA_VLAN_PROTOCOL]) +		proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]); +	else +		proto = htons(ETH_P_8021Q); -	err = vlan_check_real_dev(real_dev, vlan->vlan_id); +	vlan->vlan_proto = proto; +	vlan->vlan_id	 = nla_get_u16(data[IFLA_VLAN_ID]); +	vlan->real_dev	 = real_dev; +	vlan->flags	 = VLAN_FLAG_REORDER_HDR; + +	err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id);  	if (err < 0)  		return err; @@ -148,28 +167,32 @@ static inline size_t vlan_qos_map_size(unsigned int n)  static size_t vlan_get_size(const struct net_device *dev)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev); -	return nla_total_size(2) +	/* IFLA_VLAN_ID */ -	       sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ +	return nla_total_size(2) +	/* IFLA_VLAN_PROTOCOL */ +	       nla_total_size(2) +	/* IFLA_VLAN_ID */ +	       nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */  	       vlan_qos_map_size(vlan->nr_ingress_mappings) +  	       vlan_qos_map_size(vlan->nr_egress_mappings);  }  static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)  { -	struct vlan_dev_info *vlan = vlan_dev_info(dev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);  	struct vlan_priority_tci_mapping *pm;  	struct ifla_vlan_flags f;  	struct ifla_vlan_qos_mapping m;  	struct nlattr *nest;  	unsigned int i; -	NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id); +	if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) || +	    nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id)) +		goto nla_put_failure;  	if (vlan->flags) {  		f.flags = vlan->flags;  		f.mask  = ~0; -		NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); +		if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f)) +			goto nla_put_failure;  	}  	if (vlan->nr_ingress_mappings) {  		nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); @@ -182,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)  			m.from = i;  			m.to   = vlan->ingress_priority_map[i]; -			NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, -				sizeof(m), &m); +			if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, +				    sizeof(m), &m)) +				goto nla_put_failure;  		}  		nla_nest_end(skb, nest);  	} @@ -201,8 +225,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)  				m.from = pm->priority;  				m.to   = (pm->vlan_qos >> 13) & 0x7; -				NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, -					sizeof(m), &m); +				if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, +					    sizeof(m), &m)) +					goto nla_put_failure;  			}  		}  		nla_nest_end(skb, nest); @@ -217,7 +242,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {  	.kind		= "vlan",  	.maxtype	= IFLA_VLAN_MAX,  	.policy		= vlan_policy, -	.priv_size	= sizeof(struct vlan_dev_info), +	.priv_size	= sizeof(struct vlan_dev_priv),  	.setup		= vlan_setup,  	.validate	= vlan_validate,  	.newlink	= vlan_newlink, diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index d1314cf18ad..1d0e89213a2 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -17,6 +17,8 @@   * Jan 20, 1998        Ben Greear     Initial Version   *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +  #include <linux/module.h>  #include <linux/errno.h>  #include <linux/kernel.h> @@ -54,7 +56,7 @@ static const char name_conf[]	 = "config";  /*   *	Structures for interfacing with the /proc filesystem. - *	VLAN creates its own directory /proc/net/vlan with the folowing + *	VLAN creates its own directory /proc/net/vlan with the following   *	entries:   *	config		device status/configuration   *	<device>	entry for each  device @@ -91,7 +93,7 @@ static const struct file_operations vlan_fops = {  static int vlandev_seq_open(struct inode *inode, struct file *file)  { -	return single_open(file, vlandev_seq_show, PDE(inode)->data); +	return single_open(file, vlandev_seq_show, PDE_DATA(inode));  }  static const struct file_operations vlandev_fops = { @@ -103,7 +105,7 @@ static const struct file_operations vlandev_fops = {  };  /* - * Proc filesystem derectory entries. + * Proc filesystem directory entries.   */  /* Strings */ @@ -129,7 +131,7 @@ void vlan_proc_cleanup(struct net *net)  		remove_proc_entry(name_conf, vn->proc_vlan_dir);  	if (vn->proc_vlan_dir) -		proc_net_remove(net, name_root); +		remove_proc_entry(name_root, net->proc_net);  	/* Dynamically added entries should be cleaned up as their vlan_device  	 * is removed, so we should not have to take care of it here... @@ -155,7 +157,7 @@ int __net_init vlan_proc_init(struct net *net)  	return 0;  err: -	pr_err("%s: can't create entry in proc filesystem!\n", __func__); +	pr_err("can't create entry in proc filesystem!\n");  	vlan_proc_cleanup(net);  	return -ENOBUFS;  } @@ -166,13 +168,13 @@ err:  int vlan_proc_add_dev(struct net_device *vlandev)  { -	struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); +	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);  	struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); -	dev_info->dent = +	vlan->dent =  		proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,  				 vn->proc_vlan_dir, &vlandev_fops, vlandev); -	if (!dev_info->dent) +	if (!vlan->dent)  		return -ENOBUFS;  	return 0;  } @@ -182,14 +184,9 @@ int vlan_proc_add_dev(struct net_device *vlandev)   */  int vlan_proc_rem_dev(struct net_device *vlandev)  { -	struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); -  	/** NOTE:  This will consume the memory pointed to by dent, it seems. */ -	if (vlan_dev_info(vlandev)->dent) { -		remove_proc_entry(vlan_dev_info(vlandev)->dent->name, -				  vn->proc_vlan_dir); -		vlan_dev_info(vlandev)->dent = NULL; -	} +	proc_remove(vlan_dev_priv(vlandev)->dent); +	vlan_dev_priv(vlandev)->dent = NULL;  	return 0;  } @@ -229,7 +226,7 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)  	++*pos; -	dev = (struct net_device *)v; +	dev = v;  	if (v == SEQ_START_TOKEN)  		dev = net_device_entry(&net->dev_base_head); @@ -266,10 +263,10 @@ static int vlan_seq_show(struct seq_file *seq, void *v)  			   nmtype ? nmtype :  "UNKNOWN");  	} else {  		const struct net_device *vlandev = v; -		const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); +		const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);  		seq_printf(seq, "%-15s| %d  | %s\n",  vlandev->name, -			   dev_info->vlan_id,    dev_info->real_dev->name); +			   vlan->vlan_id,    vlan->real_dev->name);  	}  	return 0;  } @@ -277,7 +274,7 @@ static int vlan_seq_show(struct seq_file *seq, void *v)  static int vlandev_seq_show(struct seq_file *seq, void *offset)  {  	struct net_device *vlandev = (struct net_device *) seq->private; -	const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); +	const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);  	struct rtnl_link_stats64 temp;  	const struct rtnl_link_stats64 *stats;  	static const char fmt64[] = "%30s %12llu\n"; @@ -289,8 +286,8 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)  	stats = dev_get_stats(vlandev, &temp);  	seq_printf(seq,  		   "%s  VID: %d	 REORDER_HDR: %i  dev->priv_flags: %hx\n", -		   vlandev->name, dev_info->vlan_id, -		   (int)(dev_info->flags & 1), vlandev->priv_flags); +		   vlandev->name, vlan->vlan_id, +		   (int)(vlan->flags & 1), vlandev->priv_flags);  	seq_printf(seq, fmt64, "total frames received", stats->rx_packets);  	seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); @@ -298,23 +295,23 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)  	seq_puts(seq, "\n");  	seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);  	seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); -	seq_printf(seq, "Device: %s", dev_info->real_dev->name); +	seq_printf(seq, "Device: %s", vlan->real_dev->name);  	/* now show all PRIORITY mappings relating to this VLAN */  	seq_printf(seq, "\nINGRESS priority mappings: "  			"0:%u  1:%u  2:%u  3:%u  4:%u  5:%u  6:%u 7:%u\n", -		   dev_info->ingress_priority_map[0], -		   dev_info->ingress_priority_map[1], -		   dev_info->ingress_priority_map[2], -		   dev_info->ingress_priority_map[3], -		   dev_info->ingress_priority_map[4], -		   dev_info->ingress_priority_map[5], -		   dev_info->ingress_priority_map[6], -		   dev_info->ingress_priority_map[7]); +		   vlan->ingress_priority_map[0], +		   vlan->ingress_priority_map[1], +		   vlan->ingress_priority_map[2], +		   vlan->ingress_priority_map[3], +		   vlan->ingress_priority_map[4], +		   vlan->ingress_priority_map[5], +		   vlan->ingress_priority_map[6], +		   vlan->ingress_priority_map[7]);  	seq_printf(seq, " EGRESS priority mappings: ");  	for (i = 0; i < 16; i++) {  		const struct vlan_priority_tci_mapping *mp -			= dev_info->egress_priority_map[i]; +			= vlan->egress_priority_map[i];  		while (mp) {  			seq_printf(seq, "%u:%hu ",  				   mp->priority, ((mp->vlan_qos >> 13) & 0x7));  | 
