diff options
-rw-r--r-- | drivers/net/Kconfig | 38 | ||||
-rw-r--r-- | drivers/net/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/xen-netback/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/xen-netback/common.h | 161 | ||||
-rw-r--r-- | drivers/net/xen-netback/interface.c | 424 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 1745 | ||||
-rw-r--r-- | drivers/net/xen-netback/xenbus.c | 490 | ||||
-rw-r--r-- | drivers/net/xen-netfront.c | 20 | ||||
-rw-r--r-- | include/xen/interface/io/netif.h | 80 |
9 files changed, 2908 insertions, 54 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 46e1b1a28b8..797d6819672 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2953,12 +2953,38 @@ config XEN_NETDEV_FRONTEND select XEN_XENBUS_FRONTEND default y help - The network device frontend driver allows the kernel to - access network devices exported exported by a virtual - machine containing a physical network device driver. The - frontend driver is intended for unprivileged guest domains; - if you are compiling a kernel for a Xen guest, you almost - certainly want to enable this. + This driver provides support for Xen paravirtual network + devices exported by a Xen network driver domain (often + domain 0). + + The corresponding Linux backend driver is enabled by the + CONFIG_XEN_NETDEV_BACKEND option. + + If you are compiling a kernel for use as Xen guest, you + should say Y here. To compile this driver as a module, chose + M here: the module will be called xen-netfront. + +config XEN_NETDEV_BACKEND + tristate "Xen backend network device" + depends on XEN_BACKEND + help + This driver allows the kernel to act as a Xen network driver + domain which exports paravirtual network devices to other + Xen domains. These devices can be accessed by any operating + system that implements a compatible front end. + + The corresponding Linux frontend driver is enabled by the + CONFIG_XEN_NETDEV_FRONTEND configuration option. + + The backend driver presents a standard network device + endpoint for each paravirtual network device to the driver + domain network stack. These can then be bridged or routed + etc in order to provide full network connectivity. + + If you are compiling a kernel to run in a Xen network driver + domain (often this is domain 0) you should say Y here. To + compile this driver as a module, chose M here: the module + will be called xen-netback. config ISERIES_VETH tristate "iSeries Virtual Ethernet driver support" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 7c2171179f9..01b604ad155 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -172,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o +obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/ obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_IFB) += ifb.o diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile new file mode 100644 index 00000000000..e346e8125ef --- /dev/null +++ b/drivers/net/xen-netback/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o + +xen-netback-y := netback.o xenbus.o interface.o diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h new file mode 100644 index 00000000000..5d7bbf2b2ee --- /dev/null +++ b/drivers/net/xen-netback/common.h @@ -0,0 +1,161 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __XEN_NETBACK__COMMON_H__ +#define __XEN_NETBACK__COMMON_H__ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/wait.h> +#include <linux/sched.h> + +#include <xen/interface/io/netif.h> +#include <xen/interface/grant_table.h> +#include <xen/grant_table.h> +#include <xen/xenbus.h> + +struct xen_netbk; + +struct xenvif { + /* Unique identifier for this interface. */ + domid_t domid; + unsigned int handle; + + /* Reference to netback processing backend. */ + struct xen_netbk *netbk; + + u8 fe_dev_addr[6]; + + /* Physical parameters of the comms window. */ + grant_handle_t tx_shmem_handle; + grant_ref_t tx_shmem_ref; + grant_handle_t rx_shmem_handle; + grant_ref_t rx_shmem_ref; + unsigned int irq; + + /* List of frontends to notify after a batch of frames sent. */ + struct list_head notify_list; + + /* The shared rings and indexes. */ + struct xen_netif_tx_back_ring tx; + struct xen_netif_rx_back_ring rx; + struct vm_struct *tx_comms_area; + struct vm_struct *rx_comms_area; + + /* Flags that must not be set in dev->features */ + u32 features_disabled; + + /* Frontend feature information. */ + u8 can_sg:1; + u8 gso:1; + u8 gso_prefix:1; + u8 csum:1; + + /* Internal feature information. */ + u8 can_queue:1; /* can queue packets for receiver? */ + + /* + * Allow xenvif_start_xmit() to peek ahead in the rx request + * ring. This is a prediction of what rx_req_cons will be + * once all queued skbs are put on the ring. + */ + RING_IDX rx_req_cons_peek; + + /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ + unsigned long credit_bytes; + unsigned long credit_usec; + unsigned long remaining_credit; + struct timer_list credit_timeout; + + /* Statistics */ + unsigned long rx_gso_checksum_fixup; + + /* Miscellaneous private stuff. */ + struct list_head schedule_list; + atomic_t refcnt; + struct net_device *dev; + + wait_queue_head_t waiting_to_free; +}; + +#define XEN_NETIF_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) +#define XEN_NETIF_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) + +struct xenvif *xenvif_alloc(struct device *parent, + domid_t domid, + unsigned int handle); + +int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, + unsigned long rx_ring_ref, unsigned int evtchn); +void xenvif_disconnect(struct xenvif *vif); + +void xenvif_get(struct xenvif *vif); +void xenvif_put(struct xenvif *vif); + +int xenvif_xenbus_init(void); + +int xenvif_schedulable(struct xenvif *vif); + +int xen_netbk_rx_ring_full(struct xenvif *vif); + +int xen_netbk_must_stop_queue(struct xenvif *vif); + +/* (Un)Map communication rings. */ +void xen_netbk_unmap_frontend_rings(struct xenvif *vif); +int xen_netbk_map_frontend_rings(struct xenvif *vif, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref); + +/* (De)Register a xenvif with the netback backend. */ +void xen_netbk_add_xenvif(struct xenvif *vif); +void xen_netbk_remove_xenvif(struct xenvif *vif); + +/* (De)Schedule backend processing for a xenvif */ +void xen_netbk_schedule_xenvif(struct xenvif *vif); +void xen_netbk_deschedule_xenvif(struct xenvif *vif); + +/* Check for SKBs from frontend and schedule backend processing */ +void xen_netbk_check_rx_xenvif(struct xenvif *vif); +/* Receive an SKB from the frontend */ +void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb); + +/* Queue an SKB for transmission to the frontend */ +void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); +/* Notify xenvif that ring now has space to send an skb to the frontend */ +void xenvif_notify_tx_completion(struct xenvif *vif); + +/* Returns number of ring slots required to send an skb to the frontend */ +unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); + +#endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c new file mode 100644 index 00000000000..de569cc19da --- /dev/null +++ b/drivers/net/xen-netback/interface.c @@ -0,0 +1,424 @@ +/* + * Network-device interface management. + * + * Copyright (c) 2004-2005, Keir Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "common.h" + +#include <linux/ethtool.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> + +#include <xen/events.h> +#include <asm/xen/hypercall.h> + +#define XENVIF_QUEUE_LENGTH 32 + +void xenvif_get(struct xenvif *vif) +{ + atomic_inc(&vif->refcnt); +} + +void xenvif_put(struct xenvif *vif) +{ + if (atomic_dec_and_test(&vif->refcnt)) + wake_up(&vif->waiting_to_free); +} + +int xenvif_schedulable(struct xenvif *vif) +{ + return netif_running(vif->dev) && netif_carrier_ok(vif->dev); +} + +static int xenvif_rx_schedulable(struct xenvif *vif) +{ + return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); +} + +static irqreturn_t xenvif_interrupt(int irq, void *dev_id) +{ + struct xenvif *vif = dev_id; + + if (vif->netbk == NULL) + return IRQ_NONE; + + xen_netbk_schedule_xenvif(vif); + + if (xenvif_rx_schedulable(vif)) + netif_wake_queue(vif->dev); + + return IRQ_HANDLED; +} + +static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + + BUG_ON(skb->dev != dev); + + if (vif->netbk == NULL) + goto drop; + + /* Drop the packet if the target domain has no receive buffers. */ + if (!xenvif_rx_schedulable(vif)) + goto drop; + + /* Reserve ring slots for the worst-case number of fragments. */ + vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); + xenvif_get(vif); + + if (vif->can_queue && xen_netbk_must_stop_queue(vif)) + netif_stop_queue(dev); + + xen_netbk_queue_tx_skb(vif, skb); + + return NETDEV_TX_OK; + + drop: + vif->dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb) +{ + netif_rx_ni(skb); +} + +void xenvif_notify_tx_completion(struct xenvif *vif) +{ + if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) + netif_wake_queue(vif->dev); +} + +static struct net_device_stats *xenvif_get_stats(struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + return &vif->dev->stats; +} + +static void xenvif_up(struct xenvif *vif) +{ + xen_netbk_add_xenvif(vif); + enable_irq(vif->irq); + xen_netbk_check_rx_xenvif(vif); +} + +static void xenvif_down(struct xenvif *vif) +{ + disable_irq(vif->irq); + xen_netbk_deschedule_xenvif(vif); + xen_netbk_remove_xenvif(vif); +} + +static int xenvif_open(struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + if (netif_carrier_ok(dev)) + xenvif_up(vif); + netif_start_queue(dev); + return 0; +} + +static int xenvif_close(struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + if (netif_carrier_ok(dev)) + xenvif_down(vif); + netif_stop_queue(dev); + return 0; +} + +static int xenvif_change_mtu(struct net_device *dev, int mtu) +{ + struct xenvif *vif = netdev_priv(dev); + int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; + + if (mtu > max) + return -EINVAL; + dev->mtu = mtu; + return 0; +} + +static void xenvif_set_features(struct xenvif *vif) +{ + struct net_device *dev = vif->dev; + u32 features = dev->features; + + if (vif->can_sg) + features |= NETIF_F_SG; + if (vif->gso || vif->gso_prefix) + features |= NETIF_F_TSO; + if (vif->csum) + features |= NETIF_F_IP_CSUM; + + features &= ~(vif->features_disabled); + + if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) + dev->mtu = ETH_DATA_LEN; + + dev->features = features; +} + +static int xenvif_set_tx_csum(struct net_device *dev, u32 data) +{ + struct xenvif *vif = netdev_priv(dev); + if (data) { + if (!vif->csum) + return -EOPNOTSUPP; + vif->features_disabled &= ~NETIF_F_IP_CSUM; + } else { + vif->features_disabled |= NETIF_F_IP_CSUM; + } + + xenvif_set_features(vif); + return 0; +} + +static int xenvif_set_sg(struct net_device *dev, u32 data) +{ + struct xenvif *vif = netdev_priv(dev); + if (data) { + if (!vif->can_sg) + return -EOPNOTSUPP; + vif->features_disabled &= ~NETIF_F_SG; + } else { + vif->features_disabled |= NETIF_F_SG; + } + + xenvif_set_features(vif); + return 0; +} + +static int xenvif_set_tso(struct net_device *dev, u32 data) +{ + struct xenvif *vif = netdev_priv(dev); + if (data) { + if (!vif->gso && !vif->gso_prefix) + return -EOPNOTSUPP; + vif->features_disabled &= ~NETIF_F_TSO; + } else { + vif->features_disabled |= NETIF_F_TSO; + } + + xenvif_set_features(vif); + return 0; +} + +static const struct xenvif_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} xenvif_stats[] = { + { + "rx_gso_checksum_fixup", + offsetof(struct xenvif, rx_gso_checksum_fixup) + }, +}; + +static int xenvif_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return ARRAY_SIZE(xenvif_stats); + default: + return -EINVAL; + } +} + +static void xenvif_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + void *vif = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) + data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); +} + +static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + xenvif_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static struct ethtool_ops xenvif_ethtool_ops = { + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = xenvif_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = xenvif_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = xenvif_set_tso, + .get_link = ethtool_op_get_link, + + .get_sset_count = xenvif_get_sset_count, + .get_ethtool_stats = xenvif_get_ethtool_stats, + .get_strings = xenvif_get_strings, +}; + +static struct net_device_ops xenvif_netdev_ops = { + .ndo_start_xmit = xenvif_start_xmit, + .ndo_get_stats = xenvif_get_stats, + .ndo_open = xenvif_open, + .ndo_stop = xenvif_close, + .ndo_change_mtu = xenvif_change_mtu, +}; + +struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, + unsigned int handle) +{ + int err; + struct net_device *dev; + struct xenvif *vif; + char name[IFNAMSIZ] = {}; + + snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); + dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); + if (dev == NULL) { + pr_warn("Could not allocate netdev\n"); + return ERR_PTR(-ENOMEM); + } + + SET_NETDEV_DEV(dev, parent); + + vif = netdev_priv(dev); + vif->domid = domid; + vif->handle = handle; + vif->netbk = NULL; + vif->can_sg = 1; + vif->csum = 1; + atomic_set(&vif->refcnt, 1); + init_waitqueue_head(&vif->waiting_to_free); + vif->dev = dev; + INIT_LIST_HEAD(&vif->schedule_list); + INIT_LIST_HEAD(&vif->notify_list); + + vif->credit_bytes = vif->remaining_credit = ~0UL; + vif->credit_usec = 0UL; + init_timer(&vif->credit_timeout); + /* Initialize 'expires' now: it's used to track the credit window. */ + vif->credit_timeout.expires = jiffies; + + dev->netdev_ops = &xenvif_netdev_ops; + xenvif_set_features(vif); + SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); + + dev->tx_queue_len = XENVIF_QUEUE_LENGTH; + + /* + * Initialise a dummy MAC address. We choose the numerically + * largest non-broadcast address to prevent the address getting + * stolen by an Ethernet bridge for STP purposes. + * (FE:FF:FF:FF:FF:FF) + */ + memset(dev->dev_addr, 0xFF, ETH_ALEN); + dev->dev_addr[0] &= ~0x01; + + netif_carrier_off(dev); + + err = register_netdev(dev); + if (err) { + netdev_warn(dev, "Could not register device: err=%d\n", err); + free_netdev(dev); + return ERR_PTR(err); + } + + netdev_dbg(dev, "Successfully created xenvif\n"); + return vif; +} + +int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, + unsigned long rx_ring_ref, unsigned int evtchn) +{ + int err = -ENOMEM; + + /* Already connected through? */ + if (vif->irq) + return 0; + + xenvif_set_features(vif); + + err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); + if (err < 0) + goto err; + + err = bind_interdomain_evtchn_to_irqhandler( + vif->domid, evtchn, xenvif_interrupt, 0, + vif->dev->name, vif); + if (err < 0) + goto err_unmap; + vif->irq = err; + disable_irq(vif->irq); + + xenvif_get(vif); + + rtnl_lock(); + netif_carrier_on(vif->dev); + if (netif_running(vif->dev)) + xenvif_up(vif); + rtnl_unlock(); + + return 0; +err_unmap: + xen_netbk_unmap_frontend_rings(vif); +err: + return err; +} + +void xenvif_disconnect(struct xenvif *vif) +{ + struct net_device *dev = vif->dev; + if (netif_carrier_ok(dev)) { + rtnl_lock(); + netif_carrier_off(dev); /* discard queued packets */ + if (netif_running(dev)) + xenvif_down(vif); + rtnl_unlock(); + xenvif_put(vif); + } + + atomic_dec(&vif->refcnt); + wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); + + del_timer_sync(&vif->credit_timeout); + + if (vif->irq) + unbind_from_irqhandler(vif->irq, vif); + + unregister_netdev(vif->dev); + + xen_netbk_unmap_frontend_rings(vif); + + free_netdev(vif->dev); +} diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c new file mode 100644 index 00000000000..0e4851b8a77 --- /dev/null +++ b/drivers/net/xen-netback/netback.c @@ -0,0 +1,1745 @@ +/* + * Back-end of the driver for virtual network devices. This portion of the + * driver exports a 'unified' network-device interface that can be accessed + * by any operating system that implements a compatible front end. A + * reference front-end implementation can be found in: + * drivers/net/xen-netfront.c + * + * Copyright (c) 2002-2005, K A Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "common.h" + +#include <linux/kthread.h> +#include <linux/if_vlan.h> +#include <linux/udp.h> + +#include <net/tcp.h> + +#include <xen/events.h> +#include <xen/interface/memory.h> + +#include <asm/xen/hypercall.h> +#include <asm/xen/page.h> + +struct pending_tx_info { + struct xen_netif_tx_request req; + struct xenvif *vif; +}; +typedef unsigned int pending_ring_idx_t; + +struct netbk_rx_meta { + int id; + int size; + int gso_size; +}; + +#define MAX_PENDING_REQS 256 + +#define MAX_BUFFER_OFFSET PAGE_SIZE + +/* extra field used in struct page */ +union page_ext { + struct { +#if BITS_PER_LONG < 64 +#define IDX_WIDTH 8 +#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) + unsigned int group:GROUP_WIDTH; + unsigned int idx:IDX_WIDTH; +#else + unsigned int group, idx; +#endif + } e; + void *mapping; +}; + +struct xen_netbk { + wait_queue_head_t wq; + struct task_struct *task; + + struct sk_buff_head rx_queue; + struct sk_buff_head tx_queue; + + struct timer_list net_timer; + + struct page *mmap_pages[MAX_PENDING_REQS]; + + pending_ring_idx_t pending_prod; + pending_ring_idx_t pending_cons; + struct list_head net_schedule_list; + + /* Protect the net_schedule_list in netif. */ + spinlock_t net_schedule_list_lock; + + atomic_t netfront_count; + + struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; + struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; + + u16 pending_ring[MAX_PENDING_REQS]; + + /* + * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each + * head/fragment page uses 2 copy operations because it + * straddles two buffers in the frontend. + */ + struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; + struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; +}; + +static struct xen_netbk *xen_netbk; +static int xen_netbk_group_nr; + +void xen_netbk_add_xenvif(struct xenvif *vif) +{ + int i; + int min_netfront_count; + int min_group = 0; + struct xen_netbk *netbk; + + min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); + for (i = 0; i < xen_netbk_group_nr; i++) { + int netfront_count = atomic_read(&xen_netbk[i].netfront_count); + if (netfront_count < min_netfront_count) { + min_group = i; + min_netfront_count = netfront_count; + } + } + + netbk = &xen_netbk[min_group]; + + vif->netbk = netbk; + atomic_inc(&netbk->netfront_count); +} + +void xen_netbk_remove_xenvif(struct xenvif *vif) +{ + struct xen_netbk *netbk = vif->netbk; + vif->netbk = NULL; + atomic_dec(&netbk->netfront_count); +} + +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); +static void make_tx_response(struct xenvif *vif, + struct xen_netif_tx_request *txp, + s8 st); +static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, + u16 id, + s8 st, + u16 offset, + u16 size, + u16 flags); + +static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, + unsigned int idx) +{ + return page_to_pfn(netbk->mmap_pages[idx]); +} + +static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, + unsigned int idx) +{ + return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); +} + +/* extra field used in struct page */ +static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, + unsigned int idx) +{ + unsigned int group = netbk - xen_netbk; + union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; + + BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); + pg->mapping = ext.mapping; +} + +static int get_page_ext(struct page *pg, + unsigned int *pgroup, unsigned int *pidx) +{ + union page_ext ext = { .mapping = pg->mapping }; + struct xen_netbk *netbk; + unsigned int group, idx; + + group = ext.e.group - 1; + + if (group < 0 || group >= xen_netbk_group_nr) + return 0; + + netbk = &xen_netbk[group]; + + idx = ext.e.idx; + + if ((idx < 0) || (idx >= MAX_PENDING_REQS)) + return 0; + + if (netbk->mmap_pages[idx] != pg) + return 0; + + *pgroup = group; + *pidx = idx; + + return 1; +} + +/* + * This is the amount of packet we copy rather than map, so that the + * guest can't fiddle with the contents of the headers while we do + * packet processing on them (netfilter, routing, etc). + */ +#define PKT_PROT_LEN (ETH_HLEN + \ + VLAN_HLEN + \ + sizeof(struct iphdr) + MAX_IPOPTLEN + \ + sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) + +static inline pending_ring_idx_t pending_index(unsigned i) +{ + return i & (MAX_PENDING_REQS-1); +} + +static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) +{ + return MAX_PENDING_REQS - + netbk->pending_prod + netbk->pending_cons; +} + +static void xen_netbk_kick_thread(struct xen_netbk *netbk) +{ + wake_up(&netbk->wq); +} + +static int max_required_rx_slots(struct xenvif *vif) +{ + int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); + + if (vif->can_sg || vif->gso || vif->gso_prefix) + max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ + + return max; +} + +int xen_netbk_rx_ring_full(struct xenvif *vif) +{ + RING_IDX peek = vif->rx_req_cons_peek; + RING_IDX needed = max_required_rx_slots(vif); + + return ((vif->rx.sring->req_prod - peek) < needed) || + ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); +} + +int xen_netbk_must_stop_queue(struct xenvif *vif) +{ + if (!xen_netbk_rx_ring_full(vif)) + return 0; + + vif->rx.sring->req_event = vif->rx_req_cons_peek + + max_required_rx_slots(vif); + mb(); /* request notification /then/ check the queue */ + + return xen_netbk_rx_ring_full(vif); +} + +/* + * Returns true if we should start a new receive buffer instead of + * adding 'size' bytes to a buffer which currently contains 'offset' + * bytes. + */ +static bool start_new_rx_buffer(int offset, unsigned long size, int head) +{ + /* simple case: we have completely filled the current buffer. */ + if (offset == MAX_BUFFER_OFFSET) + return true; + + /* + * complex case: start a fresh buffer if the current frag + * would overflow the current buffer but only if: + * (i) this frag would fit completely in the next buffer + * and (ii) there is already some data in the current buffer + * and (iii) this is not the head buffer. + * + * Where: + * - (i) stops us splitting a frag into two copies + * unless the frag is too large for a single buffer. + * - (ii) stops us from leaving a buffer pointlessly empty. + * - (iii) stops us leaving the first buffer + * empty. Strictly speaking this is already covered + * by (ii) but is explicitly checked because + * netfront relies on the first buffer being + * non-empty and can crash otherwise. + * + * This means we will effectively linearise small + * frags but do not needlessly split large buffers + * into multiple copies tend to give large frags their + * own buffers as before. + */ + if ((offset + size > MAX_BUFFER_OFFSET) && + (size <= MAX_BUFFER_OFFSET) && offset && !head) + return true; + + return false; +} + +/* + * Figure out how many ring slots we're going to need to send @skb to + * the guest. This function is essentially a dry run of + * netbk_gop_frag_copy. + */ +unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) +{ + unsigned int count; + int i, copy_off; + + count = DIV_ROUND_UP( + offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE); + + copy_off = skb_headlen(skb) % PAGE_SIZE; + + if (skb_shinfo(skb)->gso_size) + count++; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + unsigned long size = skb_shinfo(skb)->frags[i].size; + unsigned long bytes; + while (size > 0) { + BUG_ON(copy_off > MAX_BUFFER_OFFSET); + + if (start_new_rx_buffer(copy_off, size, 0)) { + count++; + copy_off = 0; + } + + bytes = size; + if (copy_off + bytes > MAX_BUFFER_OFFSET) + bytes = MAX_BUFFER_OFFSET - copy_off; + + copy_off += bytes; + size -= bytes; + } + } + return count; +} + +struct netrx_pending_operations { + unsigned copy_prod, copy_cons; + unsigned meta_prod, meta_cons; + struct gnttab_copy *copy; + struct netbk_rx_meta *meta; + int copy_off; + grant_ref_t copy_gref; +}; + +static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, + struct netrx_pending_operations *npo) +{ + struct netbk_rx_meta *meta; + struct xen_netif_rx_request *req; + + req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); + + meta = npo->meta + npo->meta_prod++; + meta->gso_size = 0; + meta->size = 0; + meta->id = req->id; + + npo->copy_off = 0; + npo->copy_gref = req->gref; + + return meta; +} + +/* + * Set up the grant operations for this fragment. If it's a flipping + * interface, we also set up the unmap request from here. + */ +static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, + struct netrx_pending_operations *npo, + struct page *page, unsigned long size, + unsigned long offset, int *head) +{ + struct gnttab_copy *copy_gop; + struct netbk_rx_meta *meta; + /* + * These variables a used iff get_page_ext returns true, + * in which case they are guaranteed to be initialized. + */ + unsigned int uninitialized_var(group), uninitialized_var(idx); + int foreign = get_page_ext(page, &group, &idx); + unsigned long bytes; + + /* Data must not cross a page boundary. */ + BUG_ON(size + offset > PAGE_SIZE); + + meta = npo->meta + npo->meta_prod - 1; + + while (size > 0) { + BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); + + if (start_new_rx_buffer(npo->copy_off, size, *head)) { + /* + * Netfront requires there to be some data in the head + * buffer. + */ + BUG_ON(*head); + + meta = get_next_rx_buffer(vif, npo); + } + + bytes = size; + if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) + bytes = MAX_BUFFER_OFFSET - npo->copy_off; + + copy_gop = npo->copy + npo->copy_prod++; + copy_gop->flags = GNTCOPY_dest_gref; + if (foreign) { + struct xen_netbk *netbk = &xen_netbk[group]; + struct pending_tx_info *src_pend; + + src_pend = &netbk->pending_tx_info[idx]; + + copy_gop->source.domid = src_pend->vif->domid; + copy_gop->source.u.ref = src_pend->req.gref; + copy_gop->flags |= GNTCOPY_source_gref; + } else { + void *vaddr = page_address(page); + copy_gop->source.domid = DOMID_SELF; + copy_gop->source.u.gmfn = virt_to_mfn(vaddr); + } + copy_gop->source.offset = offset; + copy_gop->dest.domid = vif->domid; + + copy_gop->dest.offset = npo->copy_off; + copy_gop->dest.u.ref = npo->copy_gref; + copy_gop->len = bytes; + + npo->copy_off += bytes; + meta->size += bytes; + + offset += bytes; + size -= bytes; |