aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/macvtap.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/macvtap.c')
-rw-r--r--drivers/net/macvtap.c715
1 files changed, 530 insertions, 185 deletions
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 42567279843..3381c4f91a8 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1,5 +1,6 @@
#include <linux/etherdevice.h>
#include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/nsproxy.h>
#include <linux/compat.h>
@@ -10,9 +11,9 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/wait.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
#include <linux/fs.h>
#include <net/net_namespace.h>
@@ -29,19 +30,18 @@
* macvtap_proto is used to allocate queues through the sock allocation
* mechanism.
*
- * TODO: multiqueue support is currently not implemented, even though
- * macvtap is basically prepared for that. We will need to add this
- * here as well as in virtio-net and qemu to get line rate on 10gbit
- * adapters from a guest.
*/
struct macvtap_queue {
struct sock sk;
struct socket sock;
struct socket_wq wq;
int vnet_hdr_sz;
- struct macvlan_dev *vlan;
+ struct macvlan_dev __rcu *vlan;
struct file *file;
unsigned int flags;
+ u16 queue_index;
+ bool enabled;
+ struct list_head next;
};
static struct proto macvtap_proto = {
@@ -51,25 +51,34 @@ static struct proto macvtap_proto = {
};
/*
- * Minor number matches netdev->ifindex, so need a potentially
- * large value. This also makes it possible to split the
- * tap functionality out again in the future by offering it
- * from other drivers besides macvtap. As long as every device
- * only has one tap, the interface numbers assure that the
- * device nodes are unique.
+ * Variables for dealing with macvtaps device numbers.
*/
static dev_t macvtap_major;
-#define MACVTAP_NUM_DEVS 65536
+#define MACVTAP_NUM_DEVS (1U << MINORBITS)
+static DEFINE_MUTEX(minor_lock);
+static DEFINE_IDR(minor_idr);
+
+#define GOODCOPY_LEN 128
static struct class *macvtap_class;
static struct cdev macvtap_cdev;
static const struct proto_ops macvtap_socket_ops;
+#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+ NETIF_F_TSO6 | NETIF_F_UFO)
+#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
+static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
* pointers from one to the other can only be read while rcu_read_lock
- * or macvtap_lock is held.
+ * or rtnl is held.
*
* Both the file and the macvlan_dev hold a reference on the macvtap_queue
* through sock_hold(&q->sk). When the macvlan_dev goes away first,
@@ -81,53 +90,84 @@ static const struct proto_ops macvtap_socket_ops;
* file or the dev. The data structure is freed through __sk_free
* when both our references and any pending SKBs are gone.
*/
-static DEFINE_SPINLOCK(macvtap_lock);
-/*
- * get_slot: return a [unused/occupied] slot in vlan->taps[]:
- * - if 'q' is NULL, return the first empty slot;
- * - otherwise, return the slot this pointer occupies.
- */
-static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
+static int macvtap_enable_queue(struct net_device *dev, struct file *file,
+ struct macvtap_queue *q)
{
- int i;
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ int err = -EINVAL;
- for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
- if (rcu_dereference(vlan->taps[i]) == q)
- return i;
- }
+ ASSERT_RTNL();
+
+ if (q->enabled)
+ goto out;
- /* Should never happen */
- BUG_ON(1);
+ err = 0;
+ rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
+ q->queue_index = vlan->numvtaps;
+ q->enabled = true;
+
+ vlan->numvtaps++;
+out:
+ return err;
}
static int macvtap_set_queue(struct net_device *dev, struct file *file,
- struct macvtap_queue *q)
+ struct macvtap_queue *q)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- int index;
int err = -EBUSY;
- spin_lock(&macvtap_lock);
- if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
+ rtnl_lock();
+ if (vlan->numqueues == MAX_MACVTAP_QUEUES)
goto out;
err = 0;
- index = get_slot(vlan, NULL);
rcu_assign_pointer(q->vlan, vlan);
- rcu_assign_pointer(vlan->taps[index], q);
+ rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
sock_hold(&q->sk);
q->file = file;
+ q->queue_index = vlan->numvtaps;
+ q->enabled = true;
file->private_data = q;
+ list_add_tail(&q->next, &vlan->queue_list);
vlan->numvtaps++;
+ vlan->numqueues++;
out:
- spin_unlock(&macvtap_lock);
+ rtnl_unlock();
return err;
}
+static int macvtap_disable_queue(struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan;
+ struct macvtap_queue *nq;
+
+ ASSERT_RTNL();
+ if (!q->enabled)
+ return -EINVAL;
+
+ vlan = rtnl_dereference(q->vlan);
+
+ if (vlan) {
+ int index = q->queue_index;
+ BUG_ON(index >= vlan->numvtaps);
+ nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
+ nq->queue_index = index;
+
+ rcu_assign_pointer(vlan->taps[index], nq);
+ RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
+ q->enabled = false;
+
+ vlan->numvtaps--;
+ }
+
+ return 0;
+}
+
/*
* The file owning the queue got closed, give up both
* the reference that the files holds as well as the
@@ -140,18 +180,20 @@ static void macvtap_put_queue(struct macvtap_queue *q)
{
struct macvlan_dev *vlan;
- spin_lock(&macvtap_lock);
- vlan = rcu_dereference(q->vlan);
+ rtnl_lock();
+ vlan = rtnl_dereference(q->vlan);
+
if (vlan) {
- int index = get_slot(vlan, q);
+ if (q->enabled)
+ BUG_ON(macvtap_disable_queue(q));
- rcu_assign_pointer(vlan->taps[index], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ vlan->numqueues--;
+ RCU_INIT_POINTER(q->vlan, NULL);
sock_put(&q->sk);
- --vlan->numvtaps;
+ list_del_init(&q->next);
}
- spin_unlock(&macvtap_lock);
+ rtnl_unlock();
synchronize_rcu();
sock_put(&q->sk);
@@ -169,12 +211,24 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvtap_queue *tap = NULL;
- int numvtaps = vlan->numvtaps;
+ /* Access to taps array is protected by rcu, but access to numvtaps
+ * isn't. Below we use it to lookup a queue, but treat it as a hint
+ * and validate that the result isn't NULL - in case we are
+ * racing against queue removal.
+ */
+ int numvtaps = ACCESS_ONCE(vlan->numvtaps);
__u32 rxq;
if (!numvtaps)
goto out;
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_hash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ goto out;
+ }
+
if (likely(skb_rx_queue_recorded(skb))) {
rxq = skb_get_rx_queue(skb);
@@ -182,25 +236,10 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
rxq -= numvtaps;
tap = rcu_dereference(vlan->taps[rxq]);
- if (tap)
- goto out;
- }
-
- /* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
- if (rxq) {
- tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
- if (tap)
- goto out;
- }
-
- /* Everything failed - find first available queue */
- for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
- tap = rcu_dereference(vlan->taps[rxq]);
- if (tap)
- break;
+ goto out;
}
+ tap = rcu_dereference(vlan->taps[0]);
out:
return tap;
}
@@ -213,61 +252,138 @@ out:
static void macvtap_del_queues(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
+ struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
int i, j = 0;
- /* macvtap_put_queue can free some slots, so go through all slots */
- spin_lock(&macvtap_lock);
- for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
- q = rcu_dereference(vlan->taps[i]);
- if (q) {
- qlist[j++] = q;
- rcu_assign_pointer(vlan->taps[i], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ ASSERT_RTNL();
+ list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
+ list_del_init(&q->next);
+ qlist[j++] = q;
+ RCU_INIT_POINTER(q->vlan, NULL);
+ if (q->enabled)
vlan->numvtaps--;
- }
+ vlan->numqueues--;
}
- BUG_ON(vlan->numvtaps != 0);
- spin_unlock(&macvtap_lock);
-
- synchronize_rcu();
+ for (i = 0; i < vlan->numvtaps; i++)
+ RCU_INIT_POINTER(vlan->taps[i], NULL);
+ BUG_ON(vlan->numvtaps);
+ BUG_ON(vlan->numqueues);
+ /* guarantee that any future macvtap_set_queue will fail */
+ vlan->numvtaps = MAX_MACVTAP_QUEUES;
for (--j; j >= 0; j--)
sock_put(&qlist[j]->sk);
}
-/*
- * Forward happens for data that gets sent from one macvlan
- * endpoint to another one in bridge mode. We just take
- * the skb and put it into the receive queue.
- */
-static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
{
- struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ struct sk_buff *skb = *pskb;
+ struct net_device *dev = skb->dev;
+ struct macvlan_dev *vlan;
+ struct macvtap_queue *q;
+ netdev_features_t features = TAP_FEATURES;
+
+ vlan = macvtap_get_vlan_rcu(dev);
+ if (!vlan)
+ return RX_HANDLER_PASS;
+
+ q = macvtap_get_queue(dev, skb);
if (!q)
- goto drop;
+ return RX_HANDLER_PASS;
if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
goto drop;
- skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ skb_push(skb, ETH_HLEN);
+
+ /* Apply the forward feature mask so that we perform segmentation
+ * according to users wishes. This only works if VNET_HDR is
+ * enabled.
+ */
+ if (q->flags & IFF_VNET_HDR)
+ features |= vlan->tap_features;
+ if (netif_needs_gso(skb, features)) {
+ struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+
+ if (IS_ERR(segs))
+ goto drop;
+
+ if (!segs) {
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ goto wake_up;
+ }
+
+ kfree_skb(skb);
+ while (segs) {
+ struct sk_buff *nskb = segs->next;
+
+ segs->next = NULL;
+ skb_queue_tail(&q->sk.sk_receive_queue, segs);
+ segs = nskb;
+ }
+ } else {
+ /* If we receive a partial checksum and the tap side
+ * doesn't support checksum offload, compute the checksum.
+ * Note: it doesn't matter which checksum feature to
+ * check, we either support them all or none.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !(features & NETIF_F_ALL_CSUM) &&
+ skb_checksum_help(skb))
+ goto drop;
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ }
+
+wake_up:
wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
- return NET_RX_SUCCESS;
+ return RX_HANDLER_CONSUMED;
drop:
+ /* Count errors/drops only here, thus don't care about args. */
+ macvlan_count_rx(vlan, 0, 0, 0);
kfree_skb(skb);
- return NET_RX_DROP;
+ return RX_HANDLER_CONSUMED;
}
-/*
- * Receive is for data from the external interface (lowerdev),
- * in case of macvtap, we can treat that the same way as
- * forward, which macvlan cannot.
- */
-static int macvtap_receive(struct sk_buff *skb)
+static int macvtap_get_minor(struct macvlan_dev *vlan)
{
- skb_push(skb, ETH_HLEN);
- return macvtap_forward(skb->dev, skb);
+ int retval = -ENOMEM;
+
+ mutex_lock(&minor_lock);
+ retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
+ if (retval >= 0) {
+ vlan->minor = retval;
+ } else if (retval == -ENOSPC) {
+ printk(KERN_ERR "too many macvtap devices\n");
+ retval = -EINVAL;
+ }
+ mutex_unlock(&minor_lock);
+ return retval < 0 ? retval : 0;
+}
+
+static void macvtap_free_minor(struct macvlan_dev *vlan)
+{
+ mutex_lock(&minor_lock);
+ if (vlan->minor) {
+ idr_remove(&minor_idr, vlan->minor);
+ vlan->minor = 0;
+ }
+ mutex_unlock(&minor_lock);
+}
+
+static struct net_device *dev_get_by_macvtap_minor(int minor)
+{
+ struct net_device *dev = NULL;
+ struct macvlan_dev *vlan;
+
+ mutex_lock(&minor_lock);
+ vlan = idr_find(&minor_idr, minor);
+ if (vlan) {
+ dev = vlan->dev;
+ dev_hold(dev);
+ }
+ mutex_unlock(&minor_lock);
+ return dev;
}
static int macvtap_newlink(struct net *src_net,
@@ -275,34 +391,30 @@ static int macvtap_newlink(struct net *src_net,
struct nlattr *tb[],
struct nlattr *data[])
{
- struct device *classdev;
- dev_t devt;
+ struct macvlan_dev *vlan = netdev_priv(dev);
int err;
- err = macvlan_common_newlink(src_net, dev, tb, data,
- macvtap_receive, macvtap_forward);
- if (err)
- goto out;
+ INIT_LIST_HEAD(&vlan->queue_list);
- devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
+ /* Since macvlan supports all offloads by default, make
+ * tap support all offloads also.
+ */
+ vlan->tap_features = TUN_OFFLOADS;
- classdev = device_create(macvtap_class, &dev->dev, devt,
- dev, "tap%d", dev->ifindex);
- if (IS_ERR(classdev)) {
- err = PTR_ERR(classdev);
- macvtap_del_queues(dev);
- }
+ err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
+ if (err)
+ return err;
-out:
- return err;
+ /* Don't put anything that may fail after macvlan_common_newlink
+ * because we can't undo what it does.
+ */
+ return macvlan_common_newlink(src_net, dev, tb, data);
}
static void macvtap_dellink(struct net_device *dev,
struct list_head *head)
{
- device_destroy(macvtap_class,
- MKDEV(MAJOR(macvtap_major), dev->ifindex));
-
+ netdev_rx_handler_unregister(dev);
macvtap_del_queues(dev);
macvlan_dellink(dev, head);
}
@@ -334,10 +446,15 @@ static void macvtap_sock_write_space(struct sock *sk)
wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
+static void macvtap_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
static int macvtap_open(struct inode *inode, struct file *file)
{
struct net *net = current->nsproxy->net_ns;
- struct net_device *dev = dev_get_by_index(net, iminor(inode));
+ struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
struct macvtap_queue *q;
int err;
@@ -345,18 +462,13 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!dev)
goto out;
- /* check if this is a macvtap device */
- err = -EINVAL;
- if (dev->rtnl_link_ops != &macvtap_link_ops)
- goto out;
-
err = -ENOMEM;
q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&macvtap_proto);
if (!q)
goto out;
- q->sock.wq = &q->wq;
+ RCU_INIT_POINTER(q->sock.wq, &q->wq);
init_waitqueue_head(&q->wq.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
@@ -364,9 +476,20 @@ static int macvtap_open(struct inode *inode, struct file *file)
q->sock.ops = &macvtap_socket_ops;
sock_init_data(&q->sock, &q->sk);
q->sk.sk_write_space = macvtap_sock_write_space;
+ q->sk.sk_destruct = macvtap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+ /*
+ * so far only KVM virtio_net uses macvtap, enable zero copy between
+ * guest kernel and host kernel when lower device supports zerocopy
+ *
+ * The macvlan supports zerocopy iff the lower device supports zero
+ * copy so we don't have to look at the lower device directly.
+ */
+ if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
+ sock_set_flag(&q->sk, SOCK_ZEROCOPY);
+
err = macvtap_set_queue(dev, file, q);
if (err)
sock_put(&q->sk);
@@ -419,7 +542,7 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err);
+ err, 0);
if (!skb)
return NULL;
@@ -478,7 +601,7 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
return 0;
}
-static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
struct virtio_net_hdr *vnet_hdr)
{
memset(vnet_hdr, 0, sizeof(*vnet_hdr));
@@ -504,33 +627,36 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- vnet_hdr->csum_start = skb->csum_start -
- skb_headroom(skb);
+ vnet_hdr->csum_start = skb_checksum_start_offset(skb);
vnet_hdr->csum_offset = skb->csum_offset;
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
-
- return 0;
}
-
/* Get packet from user space buffer */
-static ssize_t macvtap_get_user(struct macvtap_queue *q,
- const struct iovec *iv, size_t count,
- int noblock)
+static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ const struct iovec *iv, unsigned long total_len,
+ size_t count, int noblock)
{
+ int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
struct sk_buff *skb;
struct macvlan_dev *vlan;
- size_t len = count;
+ unsigned long len = total_len;
int err;
struct virtio_net_hdr vnet_hdr = { 0 };
int vnet_hdr_len = 0;
+ int copylen = 0;
+ bool zerocopy = false;
+ size_t linear;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
err = -EINVAL;
- if ((len -= vnet_hdr_len) < 0)
+ if (len < vnet_hdr_len)
goto err;
+ len -= vnet_hdr_len;
err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
sizeof(vnet_hdr));
@@ -550,12 +676,44 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
if (unlikely(len < ETH_HLEN))
goto err;
- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
- noblock, &err);
+ err = -EMSGSIZE;
+ if (unlikely(count > UIO_MAXIOV))
+ goto err;
+
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
+ if (copylen > good_linear)
+ copylen = good_linear;
+ linear = copylen;
+ if (iov_pages(iv, vnet_hdr_len + copylen, count)
+ <= MAX_SKB_FRAGS)
+ zerocopy = true;
+ }
+
+ if (!zerocopy) {
+ copylen = len;
+ if (vnet_hdr.hdr_len > good_linear)
+ linear = good_linear;
+ else
+ linear = vnet_hdr.hdr_len;
+ }
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+ linear, noblock, &err);
if (!skb)
goto err;
- err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len);
+ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+ else {
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
+ len);
+ if (!err && m && m->msg_control) {
+ struct ubuf_info *uarg = m->msg_control;
+ uarg->callback(uarg, false);
+ }
+ }
+
if (err)
goto err_kfree;
@@ -569,25 +727,35 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
goto err_kfree;
}
- rcu_read_lock_bh();
+ skb_probe_transport_header(skb, ETH_HLEN);
+
+ rcu_read_lock();
vlan = rcu_dereference(q->vlan);
- if (vlan)
- macvlan_start_xmit(skb, vlan->dev);
- else
+ /* copy skb_ubuf_info for callback when skb has no error */
+ if (zerocopy) {
+ skb_shinfo(skb)->destructor_arg = m->msg_control;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ }
+ if (vlan) {
+ skb->dev = vlan->dev;
+ dev_queue_xmit(skb);
+ } else {
kfree_skb(skb);
- rcu_read_unlock_bh();
+ }
+ rcu_read_unlock();
- return count;
+ return total_len;
err_kfree:
kfree_skb(skb);
err:
- rcu_read_lock_bh();
+ rcu_read_lock();
vlan = rcu_dereference(q->vlan);
if (vlan)
- netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
- rcu_read_unlock_bh();
+ this_cpu_inc(vlan->pcpu_stats->tx_dropped);
+ rcu_read_unlock();
return err;
}
@@ -599,8 +767,8 @@ static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
ssize_t result = -ENOLINK;
struct macvtap_queue *q = file->private_data;
- result = macvtap_get_user(q, iv, iov_length(iv, count),
- file->f_flags & O_NONBLOCK);
+ result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
+ file->f_flags & O_NONBLOCK);
return result;
}
@@ -609,9 +777,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
const struct sk_buff *skb,
const struct iovec *iv, int len)
{
- struct macvlan_dev *vlan;
int ret;
int vnet_hdr_len = 0;
+ int vlan_offset = 0;
+ int copied, total;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
@@ -619,38 +788,62 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
- ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
- if (ret)
- return ret;
+ macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
+ total = copied = vnet_hdr_len;
+ total += skb->len;
+
+ if (!vlan_tx_tag_present(skb))
+ len = min_t(int, skb->len, len);
+ else {
+ int copy;
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } veth;
+ veth.h_vlan_proto = skb->vlan_proto;
+ veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ len = min_t(int, skb->len + VLAN_HLEN, len);
+ total += VLAN_HLEN;
+
+ copy = min_t(int, vlan_offset, len);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
+ len -= copy;
+ copied += copy;
+ if (ret || !len)
+ goto done;
+
+ copy = min_t(int, sizeof(veth), len);
+ ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
+ len -= copy;
+ copied += copy;
+ if (ret || !len)
+ goto done;
+ }
- len = min_t(int, skb->len, len);
-
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
-
- rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
- if (vlan)
- macvlan_count_rx(vlan, len, ret == 0, 0);
- rcu_read_unlock_bh();
+ ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
- return ret ? ret : (len + vnet_hdr_len);
+done:
+ return ret ? ret : total;
}
-static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+static ssize_t macvtap_do_read(struct macvtap_queue *q,
const struct iovec *iv, unsigned long len,
int noblock)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
- current->state = TASK_INTERRUPTIBLE;
+ if (!noblock)
+ prepare_to_wait(sk_sleep(&q->sk), &wait,
+ TASK_INTERRUPTIBLE);
/* Read frames from the queue */
skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -672,8 +865,8 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
break;
}
- current->state = TASK_RUNNING;
- remove_wait_queue(sk_sleep(&q->sk), &wait);
+ if (!noblock)
+ finish_wait(sk_sleep(&q->sk), &wait);
return ret;
}
@@ -690,12 +883,103 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
- ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
+ ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
out:
return ret;
}
+static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan;
+
+ ASSERT_RTNL();
+ vlan = rtnl_dereference(q->vlan);
+ if (vlan)
+ dev_hold(vlan->dev);
+
+ return vlan;
+}
+
+static void macvtap_put_vlan(struct macvlan_dev *vlan)
+{
+ dev_put(vlan->dev);
+}
+
+static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
+{
+ struct macvtap_queue *q = file->private_data;
+ struct macvlan_dev *vlan;
+ int ret;
+
+ vlan = macvtap_get_vlan(q);
+ if (!vlan)
+ return -EINVAL;
+
+ if (flags & IFF_ATTACH_QUEUE)
+ ret = macvtap_enable_queue(vlan->dev, file, q);
+ else if (flags & IFF_DETACH_QUEUE)
+ ret = macvtap_disable_queue(q);
+ else
+ ret = -EINVAL;
+
+ macvtap_put_vlan(vlan);
+ return ret;
+}
+
+static int set_offload(struct macvtap_queue *q, unsigned long arg)
+{
+ struct macvlan_dev *vlan;
+ netdev_features_t features;
+ netdev_features_t feature_mask = 0;
+
+ vlan = rtnl_dereference(q->vlan);
+ if (!vlan)
+ return -ENOLINK;
+
+ features = vlan->dev->features;
+
+ if (arg & TUN_F_CSUM) {
+ feature_mask = NETIF_F_HW_CSUM;
+
+ if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
+ if (arg & TUN_F_TSO_ECN)
+ feature_mask |= NETIF_F_TSO_ECN;
+ if (arg & TUN_F_TSO4)
+ feature_mask |= NETIF_F_TSO;
+ if (arg & TUN_F_TSO6)
+ feature_mask |= NETIF_F_TSO6;
+ }
+
+ if (arg & TUN_F_UFO)
+ feature_mask |= NETIF_F_UFO;
+ }
+
+ /* tun/tap driver inverts the usage for TSO offloads, where
+ * setting the TSO bit means that the userspace wants to
+ * accept TSO frames and turning it off means that user space
+ * does not support TSO.
+ * For macvtap, we have to invert it to mean the same thing.
+ * When user space turns off TSO, we turn off GSO/LRO so that
+ * user-space will not receive TSO frames.
+ */
+ if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+ features |= RX_OFFLOADS;
+ else
+ features &= ~RX_OFFLOADS;
+
+ /* tap_features are the same as features on tun/tap and
+ * reflect user expectations.
+ */
+ vlan->tap_features = feature_mask;
+ vlan->set_features = features;
+ netdev_update_features(vlan->dev);
+
+ return 0;
+}
+
/*
* provide compatibility with generic tun/tap interface
*/
@@ -719,7 +1003,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
ret = 0;
- if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
+ if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
+ (IFF_NO_PI | IFF_TAP))
ret = -EINVAL;
else
q->flags = u;
@@ -727,24 +1012,32 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return ret;
case TUNGETIFF:
- rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
- if (vlan)
- dev_hold(vlan->dev);
- rcu_read_unlock_bh();
-
- if (!vlan)
+ rtnl_lock();
+ vlan = macvtap_get_vlan(q);
+ if (!vlan) {
+ rtnl_unlock();
return -ENOLINK;
+ }
ret = 0;
- if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
put_user(q->flags, &ifr->ifr_flags))
ret = -EFAULT;
- dev_put(vlan->dev);
+ macvtap_put_vlan(vlan);
+ rtnl_unlock();
+ return ret;
+
+ case TUNSETQUEUE:
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+ rtnl_lock();
+ ret = macvtap_ioctl_set_queue(file, u);
+ rtnl_unlock();
return ret;
case TUNGETFEATURES:
- if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
+ if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
+ IFF_MULTI_QUEUE, up))
return -EFAULT;
return 0;
@@ -776,11 +1069,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
- /* TODO: only accept frames with the features that
- got enabled for forwarded frames */
- if (!(q->flags & IFF_VNET_HDR))
- return -EINVAL;
- return 0;
+ rtnl_lock();
+ ret = set_offload(q, arg);
+ rtnl_unlock();
+ return ret;
default:
return -EINVAL;
@@ -813,7 +1105,7 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
- return macvtap_get_user(q, m->msg_iov, total_len,
+ return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
m->msg_flags & MSG_DONTWAIT);
}
@@ -825,7 +1117,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+ ret = macvtap_do_read(q, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
@@ -856,6 +1148,52 @@ struct socket *macvtap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(macvtap_get_socket);
+static int macvtap_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct macvlan_dev *vlan;
+ struct device *classdev;
+ dev_t devt;
+ int err;
+
+ if (dev->rtnl_link_ops != &macvtap_link_ops)
+ return NOTIFY_DONE;
+
+ vlan = netdev_priv(dev);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ /* Create the device node here after the network device has
+ * been registered but before register_netdevice has
+ * finished running.
+ */
+ err = macvtap_get_minor(vlan);
+ if (err)
+ return notifier_from_errno(err);
+
+ devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ classdev = device_create(macvtap_class, &dev->dev, devt,
+ dev, "tap%d", dev->ifindex);
+ if (IS_ERR(classdev)) {
+ macvtap_free_minor(vlan);
+ return notifier_from_errno(PTR_ERR(classdev));
+ }
+ break;
+ case NETDEV_UNREGISTER:
+ devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ device_destroy(macvtap_class, devt);
+ macvtap_free_minor(vlan);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block macvtap_notifier_block __read_mostly = {
+ .notifier_call = macvtap_device_event,
+};
+
static int macvtap_init(void)
{
int err;
@@ -876,12 +1214,18 @@ static int macvtap_init(void)
goto out3;
}
- err = macvlan_link_register(&macvtap_link_ops);
+ err = register_netdevice_notifier(&macvtap_notifier_block);
if (err)
goto out4;
+ err = macvlan_link_register(&macvtap_link_ops);
+ if (err)
+ goto out5;
+
return 0;
+out5:
+ unregister_netdevice_notifier(&macvtap_notifier_block);
out4:
class_unregister(macvtap_class);
out3:
@@ -896,6 +1240,7 @@ module_init(macvtap_init);
static void macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
+ unregister_netdevice_notifier(&macvtap_notifier_block);
class_unregister(macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);