diff options
Diffstat (limited to 'drivers/net/xen-netback')
| -rw-r--r-- | drivers/net/xen-netback/Makefile | 3 | ||||
| -rw-r--r-- | drivers/net/xen-netback/common.h | 306 | ||||
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 734 | ||||
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 2045 | ||||
| -rw-r--r-- | drivers/net/xen-netback/xenbus.c | 786 | 
5 files changed, 3874 insertions, 0 deletions
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile new file mode 100644 index 00000000000..e346e8125ef --- /dev/null +++ b/drivers/net/xen-netback/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o + +xen-netback-y := netback.o xenbus.o interface.o diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h new file mode 100644 index 00000000000..2532ce85d71 --- /dev/null +++ b/drivers/net/xen-netback/common.h @@ -0,0 +1,306 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __XEN_NETBACK__COMMON_H__ +#define __XEN_NETBACK__COMMON_H__ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/wait.h> +#include <linux/sched.h> + +#include <xen/interface/io/netif.h> +#include <xen/interface/grant_table.h> +#include <xen/grant_table.h> +#include <xen/xenbus.h> + +typedef unsigned int pending_ring_idx_t; +#define INVALID_PENDING_RING_IDX (~0U) + +struct pending_tx_info { +	struct xen_netif_tx_request req; /* tx request */ +	/* Callback data for released SKBs. The callback is always +	 * xenvif_zerocopy_callback, desc contains the pending_idx, which is +	 * also an index in pending_tx_info array. It is initialized in +	 * xenvif_alloc and it never changes. +	 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's +	 * callback_struct in this array of struct pending_tx_info's, then ctx +	 * to the next, or NULL if there is no more slot for this skb. +	 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer +	 * to this field. +	 */ +	struct ubuf_info callback_struct; +}; + +#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) +#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) + +struct xenvif_rx_meta { +	int id; +	int size; +	int gso_type; +	int gso_size; +}; + +#define GSO_BIT(type) \ +	(1 << XEN_NETIF_GSO_TYPE_ ## type) + +/* Discriminate from any valid pending_idx value. */ +#define INVALID_PENDING_IDX 0xFFFF + +#define MAX_BUFFER_OFFSET PAGE_SIZE + +#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE + +/* It's possible for an skb to have a maximal number of frags + * but still be less than MAX_BUFFER_OFFSET in size. Thus the + * worst-case number of copy operations is MAX_SKB_FRAGS per + * ring slot. + */ +#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) + +#define NETBACK_INVALID_HANDLE -1 + +/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating + * the maximum slots a valid packet can use. Now this value is defined + * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by + * all backend. + */ +#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN + +/* Queue name is interface name with "-qNNN" appended */ +#define QUEUE_NAME_SIZE (IFNAMSIZ + 5) + +/* IRQ name is queue name with "-tx" or "-rx" appended */ +#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) + +struct xenvif; + +struct xenvif_stats { +	/* Stats fields to be updated per-queue. +	 * A subset of struct net_device_stats that contains only the +	 * fields that are updated in netback.c for each queue. +	 */ +	unsigned int rx_bytes; +	unsigned int rx_packets; +	unsigned int tx_bytes; +	unsigned int tx_packets; + +	/* Additional stats used by xenvif */ +	unsigned long rx_gso_checksum_fixup; +	unsigned long tx_zerocopy_sent; +	unsigned long tx_zerocopy_success; +	unsigned long tx_zerocopy_fail; +	unsigned long tx_frag_overflow; +}; + +struct xenvif_queue { /* Per-queue data for xenvif */ +	unsigned int id; /* Queue ID, 0-based */ +	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ +	struct xenvif *vif; /* Parent VIF */ + +	/* Use NAPI for guest TX */ +	struct napi_struct napi; +	/* When feature-split-event-channels = 0, tx_irq = rx_irq. */ +	unsigned int tx_irq; +	/* Only used when feature-split-event-channels = 1 */ +	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ +	struct xen_netif_tx_back_ring tx; +	struct sk_buff_head tx_queue; +	struct page *mmap_pages[MAX_PENDING_REQS]; +	pending_ring_idx_t pending_prod; +	pending_ring_idx_t pending_cons; +	u16 pending_ring[MAX_PENDING_REQS]; +	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; +	grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; + +	struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; +	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; +	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; +	/* passed to gnttab_[un]map_refs with pages under (un)mapping */ +	struct page *pages_to_map[MAX_PENDING_REQS]; +	struct page *pages_to_unmap[MAX_PENDING_REQS]; + +	/* This prevents zerocopy callbacks  to race over dealloc_ring */ +	spinlock_t callback_lock; +	/* This prevents dealloc thread and NAPI instance to race over response +	 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err +	 * it only protect response creation +	 */ +	spinlock_t response_lock; +	pending_ring_idx_t dealloc_prod; +	pending_ring_idx_t dealloc_cons; +	u16 dealloc_ring[MAX_PENDING_REQS]; +	struct task_struct *dealloc_task; +	wait_queue_head_t dealloc_wq; + +	/* Use kthread for guest RX */ +	struct task_struct *task; +	wait_queue_head_t wq; +	/* When feature-split-event-channels = 0, tx_irq = rx_irq. */ +	unsigned int rx_irq; +	/* Only used when feature-split-event-channels = 1 */ +	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ +	struct xen_netif_rx_back_ring rx; +	struct sk_buff_head rx_queue; +	RING_IDX rx_last_skb_slots; +	bool rx_queue_purge; + +	struct timer_list wake_queue; + +	struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; + +	/* We create one meta structure per ring request we consume, so +	 * the maximum number is the same as the ring size. +	 */ +	struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; + +	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ +	unsigned long   credit_bytes; +	unsigned long   credit_usec; +	unsigned long   remaining_credit; +	struct timer_list credit_timeout; +	u64 credit_window_start; + +	/* Statistics */ +	struct xenvif_stats stats; +}; + +struct xenvif { +	/* Unique identifier for this interface. */ +	domid_t          domid; +	unsigned int     handle; + +	u8               fe_dev_addr[6]; + +	/* Frontend feature information. */ +	int gso_mask; +	int gso_prefix_mask; + +	u8 can_sg:1; +	u8 ip_csum:1; +	u8 ipv6_csum:1; + +	/* Internal feature information. */ +	u8 can_queue:1;	    /* can queue packets for receiver? */ + +	/* Is this interface disabled? True when backend discovers +	 * frontend is rogue. +	 */ +	bool disabled; + +	/* Queues */ +	struct xenvif_queue *queues; +	unsigned int num_queues; /* active queues, resource allocated */ + +	/* Miscellaneous private stuff. */ +	struct net_device *dev; +}; + +static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) +{ +	return to_xenbus_device(vif->dev->dev.parent); +} + +struct xenvif *xenvif_alloc(struct device *parent, +			    domid_t domid, +			    unsigned int handle); + +int xenvif_init_queue(struct xenvif_queue *queue); +void xenvif_deinit_queue(struct xenvif_queue *queue); + +int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, +		   unsigned long rx_ring_ref, unsigned int tx_evtchn, +		   unsigned int rx_evtchn); +void xenvif_disconnect(struct xenvif *vif); +void xenvif_free(struct xenvif *vif); + +int xenvif_xenbus_init(void); +void xenvif_xenbus_fini(void); + +int xenvif_schedulable(struct xenvif *vif); + +int xenvif_must_stop_queue(struct xenvif_queue *queue); + +int xenvif_queue_stopped(struct xenvif_queue *queue); +void xenvif_wake_queue(struct xenvif_queue *queue); + +/* (Un)Map communication rings. */ +void xenvif_unmap_frontend_rings(struct xenvif_queue *queue); +int xenvif_map_frontend_rings(struct xenvif_queue *queue, +			      grant_ref_t tx_ring_ref, +			      grant_ref_t rx_ring_ref); + +/* Check for SKBs from frontend and schedule backend processing */ +void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); + +/* Prevent the device from generating any further traffic. */ +void xenvif_carrier_off(struct xenvif *vif); + +int xenvif_tx_action(struct xenvif_queue *queue, int budget); + +int xenvif_kthread_guest_rx(void *data); +void xenvif_kick_thread(struct xenvif_queue *queue); + +int xenvif_dealloc_kthread(void *data); + +/* Determine whether the needed number of slots (req) are available, + * and set req_event if not. + */ +bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed); + +void xenvif_carrier_on(struct xenvif *vif); + +/* Callback from stack when TX packet can be released */ +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); + +/* Unmap a pending page and release it back to the guest */ +void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); + +static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) +{ +	return MAX_PENDING_REQS - +		queue->pending_prod + queue->pending_cons; +} + +/* Callback from stack when TX packet can be released */ +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); + +extern bool separate_tx_rx_irq; + +extern unsigned int rx_drain_timeout_msecs; +extern unsigned int rx_drain_timeout_jiffies; +extern unsigned int xenvif_max_queues; + +#endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c new file mode 100644 index 00000000000..9e97c7ca0dd --- /dev/null +++ b/drivers/net/xen-netback/interface.c @@ -0,0 +1,734 @@ +/* + * Network-device interface management. + * + * Copyright (c) 2004-2005, Keir Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "common.h" + +#include <linux/kthread.h> +#include <linux/ethtool.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> +#include <linux/vmalloc.h> + +#include <xen/events.h> +#include <asm/xen/hypercall.h> +#include <xen/balloon.h> + +#define XENVIF_QUEUE_LENGTH 32 +#define XENVIF_NAPI_WEIGHT  64 + +static inline void xenvif_stop_queue(struct xenvif_queue *queue) +{ +	struct net_device *dev = queue->vif->dev; + +	if (!queue->vif->can_queue) +		return; + +	netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); +} + +int xenvif_schedulable(struct xenvif *vif) +{ +	return netif_running(vif->dev) && netif_carrier_ok(vif->dev); +} + +static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) +{ +	struct xenvif_queue *queue = dev_id; + +	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) +		napi_schedule(&queue->napi); + +	return IRQ_HANDLED; +} + +int xenvif_poll(struct napi_struct *napi, int budget) +{ +	struct xenvif_queue *queue = +		container_of(napi, struct xenvif_queue, napi); +	int work_done; + +	/* This vif is rogue, we pretend we've there is nothing to do +	 * for this vif to deschedule it from NAPI. But this interface +	 * will be turned off in thread context later. +	 */ +	if (unlikely(queue->vif->disabled)) { +		napi_complete(napi); +		return 0; +	} + +	work_done = xenvif_tx_action(queue, budget); + +	if (work_done < budget) { +		napi_complete(napi); +		xenvif_napi_schedule_or_enable_events(queue); +	} + +	return work_done; +} + +static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) +{ +	struct xenvif_queue *queue = dev_id; + +	xenvif_kick_thread(queue); + +	return IRQ_HANDLED; +} + +static irqreturn_t xenvif_interrupt(int irq, void *dev_id) +{ +	xenvif_tx_interrupt(irq, dev_id); +	xenvif_rx_interrupt(irq, dev_id); + +	return IRQ_HANDLED; +} + +int xenvif_queue_stopped(struct xenvif_queue *queue) +{ +	struct net_device *dev = queue->vif->dev; +	unsigned int id = queue->id; +	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); +} + +void xenvif_wake_queue(struct xenvif_queue *queue) +{ +	struct net_device *dev = queue->vif->dev; +	unsigned int id = queue->id; +	netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); +} + +/* Callback to wake the queue and drain it on timeout */ +static void xenvif_wake_queue_callback(unsigned long data) +{ +	struct xenvif_queue *queue = (struct xenvif_queue *)data; + +	if (xenvif_queue_stopped(queue)) { +		netdev_err(queue->vif->dev, "draining TX queue\n"); +		queue->rx_queue_purge = true; +		xenvif_kick_thread(queue); +		xenvif_wake_queue(queue); +	} +} + +static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ +	struct xenvif *vif = netdev_priv(dev); +	struct xenvif_queue *queue = NULL; +	unsigned int num_queues = vif->num_queues; +	u16 index; +	int min_slots_needed; + +	BUG_ON(skb->dev != dev); + +	/* Drop the packet if queues are not set up */ +	if (num_queues < 1) +		goto drop; + +	/* Obtain the queue to be used to transmit this packet */ +	index = skb_get_queue_mapping(skb); +	if (index >= num_queues) { +		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", +				    index, vif->dev->name); +		index %= num_queues; +	} +	queue = &vif->queues[index]; + +	/* Drop the packet if queue is not ready */ +	if (queue->task == NULL || +	    queue->dealloc_task == NULL || +	    !xenvif_schedulable(vif)) +		goto drop; + +	/* At best we'll need one slot for the header and one for each +	 * frag. +	 */ +	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; + +	/* If the skb is GSO then we'll also need an extra slot for the +	 * metadata. +	 */ +	if (skb_is_gso(skb)) +		min_slots_needed++; + +	/* If the skb can't possibly fit in the remaining slots +	 * then turn off the queue to give the ring a chance to +	 * drain. +	 */ +	if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { +		queue->wake_queue.function = xenvif_wake_queue_callback; +		queue->wake_queue.data = (unsigned long)queue; +		xenvif_stop_queue(queue); +		mod_timer(&queue->wake_queue, +			jiffies + rx_drain_timeout_jiffies); +	} + +	skb_queue_tail(&queue->rx_queue, skb); +	xenvif_kick_thread(queue); + +	return NETDEV_TX_OK; + + drop: +	vif->dev->stats.tx_dropped++; +	dev_kfree_skb(skb); +	return NETDEV_TX_OK; +} + +static struct net_device_stats *xenvif_get_stats(struct net_device *dev) +{ +	struct xenvif *vif = netdev_priv(dev); +	struct xenvif_queue *queue = NULL; +	unsigned int num_queues = vif->num_queues; +	unsigned long rx_bytes = 0; +	unsigned long rx_packets = 0; +	unsigned long tx_bytes = 0; +	unsigned long tx_packets = 0; +	unsigned int index; + +	if (vif->queues == NULL) +		goto out; + +	/* Aggregate tx and rx stats from each queue */ +	for (index = 0; index < num_queues; ++index) { +		queue = &vif->queues[index]; +		rx_bytes += queue->stats.rx_bytes; +		rx_packets += queue->stats.rx_packets; +		tx_bytes += queue->stats.tx_bytes; +		tx_packets += queue->stats.tx_packets; +	} + +out: +	vif->dev->stats.rx_bytes = rx_bytes; +	vif->dev->stats.rx_packets = rx_packets; +	vif->dev->stats.tx_bytes = tx_bytes; +	vif->dev->stats.tx_packets = tx_packets; + +	return &vif->dev->stats; +} + +static void xenvif_up(struct xenvif *vif) +{ +	struct xenvif_queue *queue = NULL; +	unsigned int num_queues = vif->num_queues; +	unsigned int queue_index; + +	for (queue_index = 0; queue_index < num_queues; ++queue_index) { +		queue = &vif->queues[queue_index]; +		napi_enable(&queue->napi); +		enable_irq(queue->tx_irq); +		if (queue->tx_irq != queue->rx_irq) +			enable_irq(queue->rx_irq); +		xenvif_napi_schedule_or_enable_events(queue); +	} +} + +static void xenvif_down(struct xenvif *vif) +{ +	struct xenvif_queue *queue = NULL; +	unsigned int num_queues = vif->num_queues; +	unsigned int queue_index; + +	for (queue_index = 0; queue_index < num_queues; ++queue_index) { +		queue = &vif->queues[queue_index]; +		napi_disable(&queue->napi); +		disable_irq(queue->tx_irq); +		if (queue->tx_irq != queue->rx_irq) +			disable_irq(queue->rx_irq); +		del_timer_sync(&queue->credit_timeout); +	} +} + +static int xenvif_open(struct net_device *dev) +{ +	struct xenvif *vif = netdev_priv(dev); +	if (netif_carrier_ok(dev)) +		xenvif_up(vif); +	netif_tx_start_all_queues(dev); +	return 0; +} + +static int xenvif_close(struct net_device *dev) +{ +	struct xenvif *vif = netdev_priv(dev); +	if (netif_carrier_ok(dev)) +		xenvif_down(vif); +	netif_tx_stop_all_queues(dev); +	return 0; +} + +static int xenvif_change_mtu(struct net_device *dev, int mtu) +{ +	struct xenvif *vif = netdev_priv(dev); +	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; + +	if (mtu > max) +		return -EINVAL; +	dev->mtu = mtu; +	return 0; +} + +static netdev_features_t xenvif_fix_features(struct net_device *dev, +	netdev_features_t features) +{ +	struct xenvif *vif = netdev_priv(dev); + +	if (!vif->can_sg) +		features &= ~NETIF_F_SG; +	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) +		features &= ~NETIF_F_TSO; +	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) +		features &= ~NETIF_F_TSO6; +	if (!vif->ip_csum) +		features &= ~NETIF_F_IP_CSUM; +	if (!vif->ipv6_csum) +		features &= ~NETIF_F_IPV6_CSUM; + +	return features; +} + +static const struct xenvif_stat { +	char name[ETH_GSTRING_LEN]; +	u16 offset; +} xenvif_stats[] = { +	{ +		"rx_gso_checksum_fixup", +		offsetof(struct xenvif_stats, rx_gso_checksum_fixup) +	}, +	/* If (sent != success + fail), there are probably packets never +	 * freed up properly! +	 */ +	{ +		"tx_zerocopy_sent", +		offsetof(struct xenvif_stats, tx_zerocopy_sent), +	}, +	{ +		"tx_zerocopy_success", +		offsetof(struct xenvif_stats, tx_zerocopy_success), +	}, +	{ +		"tx_zerocopy_fail", +		offsetof(struct xenvif_stats, tx_zerocopy_fail) +	}, +	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use +	 * a guest with the same MAX_SKB_FRAG +	 */ +	{ +		"tx_frag_overflow", +		offsetof(struct xenvif_stats, tx_frag_overflow) +	}, +}; + +static int xenvif_get_sset_count(struct net_device *dev, int string_set) +{ +	switch (string_set) { +	case ETH_SS_STATS: +		return ARRAY_SIZE(xenvif_stats); +	default: +		return -EINVAL; +	} +} + +static void xenvif_get_ethtool_stats(struct net_device *dev, +				     struct ethtool_stats *stats, u64 * data) +{ +	struct xenvif *vif = netdev_priv(dev); +	unsigned int num_queues = vif->num_queues; +	int i; +	unsigned int queue_index; +	struct xenvif_stats *vif_stats; + +	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { +		unsigned long accum = 0; +		for (queue_index = 0; queue_index < num_queues; ++queue_index) { +			vif_stats = &vif->queues[queue_index].stats; +			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); +		} +		data[i] = accum; +	} +} + +static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ +	int i; + +	switch (stringset) { +	case ETH_SS_STATS: +		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) +			memcpy(data + i * ETH_GSTRING_LEN, +			       xenvif_stats[i].name, ETH_GSTRING_LEN); +		break; +	} +} + +static const struct ethtool_ops xenvif_ethtool_ops = { +	.get_link	= ethtool_op_get_link, + +	.get_sset_count = xenvif_get_sset_count, +	.get_ethtool_stats = xenvif_get_ethtool_stats, +	.get_strings = xenvif_get_strings, +}; + +static const struct net_device_ops xenvif_netdev_ops = { +	.ndo_start_xmit	= xenvif_start_xmit, +	.ndo_get_stats	= xenvif_get_stats, +	.ndo_open	= xenvif_open, +	.ndo_stop	= xenvif_close, +	.ndo_change_mtu	= xenvif_change_mtu, +	.ndo_fix_features = xenvif_fix_features, +	.ndo_set_mac_address = eth_mac_addr, +	.ndo_validate_addr   = eth_validate_addr, +}; + +struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, +			    unsigned int handle) +{ +	int err; +	struct net_device *dev; +	struct xenvif *vif; +	char name[IFNAMSIZ] = {}; + +	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); +	/* Allocate a netdev with the max. supported number of queues. +	 * When the guest selects the desired number, it will be updated +	 * via netif_set_real_num_*_queues(). +	 */ +	dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, +			      xenvif_max_queues); +	if (dev == NULL) { +		pr_warn("Could not allocate netdev for %s\n", name); +		return ERR_PTR(-ENOMEM); +	} + +	SET_NETDEV_DEV(dev, parent); + +	vif = netdev_priv(dev); + +	vif->domid  = domid; +	vif->handle = handle; +	vif->can_sg = 1; +	vif->ip_csum = 1; +	vif->dev = dev; +	vif->disabled = false; + +	/* Start out with no queues. */ +	vif->queues = NULL; +	vif->num_queues = 0; + +	dev->netdev_ops	= &xenvif_netdev_ops; +	dev->hw_features = NETIF_F_SG | +		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | +		NETIF_F_TSO | NETIF_F_TSO6; +	dev->features = dev->hw_features | NETIF_F_RXCSUM; +	dev->ethtool_ops = &xenvif_ethtool_ops; + +	dev->tx_queue_len = XENVIF_QUEUE_LENGTH; + +	/* +	 * Initialise a dummy MAC address. We choose the numerically +	 * largest non-broadcast address to prevent the address getting +	 * stolen by an Ethernet bridge for STP purposes. +	 * (FE:FF:FF:FF:FF:FF) +	 */ +	memset(dev->dev_addr, 0xFF, ETH_ALEN); +	dev->dev_addr[0] &= ~0x01; + +	netif_carrier_off(dev); + +	err = register_netdev(dev); +	if (err) { +		netdev_warn(dev, "Could not register device: err=%d\n", err); +		free_netdev(dev); +		return ERR_PTR(err); +	} + +	netdev_dbg(dev, "Successfully created xenvif\n"); + +	__module_get(THIS_MODULE); + +	return vif; +} + +int xenvif_init_queue(struct xenvif_queue *queue) +{ +	int err, i; + +	queue->credit_bytes = queue->remaining_credit = ~0UL; +	queue->credit_usec  = 0UL; +	init_timer(&queue->credit_timeout); +	queue->credit_window_start = get_jiffies_64(); + +	skb_queue_head_init(&queue->rx_queue); +	skb_queue_head_init(&queue->tx_queue); + +	queue->pending_cons = 0; +	queue->pending_prod = MAX_PENDING_REQS; +	for (i = 0; i < MAX_PENDING_REQS; ++i) +		queue->pending_ring[i] = i; + +	spin_lock_init(&queue->callback_lock); +	spin_lock_init(&queue->response_lock); + +	/* If ballooning is disabled, this will consume real memory, so you +	 * better enable it. The long term solution would be to use just a +	 * bunch of valid page descriptors, without dependency on ballooning +	 */ +	err = alloc_xenballooned_pages(MAX_PENDING_REQS, +				       queue->mmap_pages, +				       false); +	if (err) { +		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); +		return -ENOMEM; +	} + +	for (i = 0; i < MAX_PENDING_REQS; i++) { +		queue->pending_tx_info[i].callback_struct = (struct ubuf_info) +			{ .callback = xenvif_zerocopy_callback, +			  .ctx = NULL, +			  .desc = i }; +		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; +	} + +	init_timer(&queue->wake_queue); + +	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, +			XENVIF_NAPI_WEIGHT); + +	return 0; +} + +void xenvif_carrier_on(struct xenvif *vif) +{ +	rtnl_lock(); +	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) +		dev_set_mtu(vif->dev, ETH_DATA_LEN); +	netdev_update_features(vif->dev); +	netif_carrier_on(vif->dev); +	if (netif_running(vif->dev)) +		xenvif_up(vif); +	rtnl_unlock(); +} + +int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, +		   unsigned long rx_ring_ref, unsigned int tx_evtchn, +		   unsigned int rx_evtchn) +{ +	struct task_struct *task; +	int err = -ENOMEM; + +	BUG_ON(queue->tx_irq); +	BUG_ON(queue->task); +	BUG_ON(queue->dealloc_task); + +	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); +	if (err < 0) +		goto err; + +	init_waitqueue_head(&queue->wq); +	init_waitqueue_head(&queue->dealloc_wq); + +	if (tx_evtchn == rx_evtchn) { +		/* feature-split-event-channels == 0 */ +		err = bind_interdomain_evtchn_to_irqhandler( +			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, +			queue->name, queue); +		if (err < 0) +			goto err_unmap; +		queue->tx_irq = queue->rx_irq = err; +		disable_irq(queue->tx_irq); +	} else { +		/* feature-split-event-channels == 1 */ +		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), +			 "%s-tx", queue->name); +		err = bind_interdomain_evtchn_to_irqhandler( +			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, +			queue->tx_irq_name, queue); +		if (err < 0) +			goto err_unmap; +		queue->tx_irq = err; +		disable_irq(queue->tx_irq); + +		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), +			 "%s-rx", queue->name); +		err = bind_interdomain_evtchn_to_irqhandler( +			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, +			queue->rx_irq_name, queue); +		if (err < 0) +			goto err_tx_unbind; +		queue->rx_irq = err; +		disable_irq(queue->rx_irq); +	} + +	task = kthread_create(xenvif_kthread_guest_rx, +			      (void *)queue, "%s-guest-rx", queue->name); +	if (IS_ERR(task)) { +		pr_warn("Could not allocate kthread for %s\n", queue->name); +		err = PTR_ERR(task); +		goto err_rx_unbind; +	} +	queue->task = task; + +	task = kthread_create(xenvif_dealloc_kthread, +			      (void *)queue, "%s-dealloc", queue->name); +	if (IS_ERR(task)) { +		pr_warn("Could not allocate kthread for %s\n", queue->name); +		err = PTR_ERR(task); +		goto err_rx_unbind; +	} +	queue->dealloc_task = task; + +	wake_up_process(queue->task); +	wake_up_process(queue->dealloc_task); + +	return 0; + +err_rx_unbind: +	unbind_from_irqhandler(queue->rx_irq, queue); +	queue->rx_irq = 0; +err_tx_unbind: +	unbind_from_irqhandler(queue->tx_irq, queue); +	queue->tx_irq = 0; +err_unmap: +	xenvif_unmap_frontend_rings(queue); +err: +	module_put(THIS_MODULE); +	return err; +} + +void xenvif_carrier_off(struct xenvif *vif) +{ +	struct net_device *dev = vif->dev; + +	rtnl_lock(); +	netif_carrier_off(dev); /* discard queued packets */ +	if (netif_running(dev)) +		xenvif_down(vif); +	rtnl_unlock(); +} + +static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, +				      unsigned int worst_case_skb_lifetime) +{ +	int i, unmap_timeout = 0; + +	for (i = 0; i < MAX_PENDING_REQS; ++i) { +		if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { +			unmap_timeout++; +			schedule_timeout(msecs_to_jiffies(1000)); +			if (unmap_timeout > worst_case_skb_lifetime && +			    net_ratelimit()) +				netdev_err(queue->vif->dev, +					   "Page still granted! Index: %x\n", +					   i); +			i = -1; +		} +	} +} + +void xenvif_disconnect(struct xenvif *vif) +{ +	struct xenvif_queue *queue = NULL; +	unsigned int num_queues = vif->num_queues; +	unsigned int queue_index; + +	if (netif_carrier_ok(vif->dev)) +		xenvif_carrier_off(vif); + +	for (queue_index = 0; queue_index < num_queues; ++queue_index) { +		queue = &vif->queues[queue_index]; + +		if (queue->task) { +			del_timer_sync(&queue->wake_queue); +			kthread_stop(queue->task); +			queue->task = NULL; +		} + +		if (queue->dealloc_task) { +			kthread_stop(queue->dealloc_task); +			queue->dealloc_task = NULL; +		} + +		if (queue->tx_irq) { +			if (queue->tx_irq == queue->rx_irq) +				unbind_from_irqhandler(queue->tx_irq, queue); +			else { +				unbind_from_irqhandler(queue->tx_irq, queue); +				unbind_from_irqhandler(queue->rx_irq, queue); +			} +			queue->tx_irq = 0; +		} + +		xenvif_unmap_frontend_rings(queue); +	} +} + +/* Reverse the relevant parts of xenvif_init_queue(). + * Used for queue teardown from xenvif_free(), and on the + * error handling paths in xenbus.c:connect(). + */ +void xenvif_deinit_queue(struct xenvif_queue *queue) +{ +	free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); +	netif_napi_del(&queue->napi); +} + +void xenvif_free(struct xenvif *vif) +{ +	struct xenvif_queue *queue = NULL; +	unsigned int num_queues = vif->num_queues; +	unsigned int queue_index; +	/* Here we want to avoid timeout messages if an skb can be legitimately +	 * stuck somewhere else. Realistically this could be an another vif's +	 * internal or QDisc queue. That another vif also has this +	 * rx_drain_timeout_msecs timeout, but the timer only ditches the +	 * internal queue. After that, the QDisc queue can put in worst case +	 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's +	 * internal queue, so we need several rounds of such timeouts until we +	 * can be sure that no another vif should have skb's from us. We are +	 * not sending more skb's, so newly stuck packets are not interesting +	 * for us here. +	 */ +	unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * +		DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); + +	unregister_netdev(vif->dev); + +	for (queue_index = 0; queue_index < num_queues; ++queue_index) { +		queue = &vif->queues[queue_index]; +		xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); +		xenvif_deinit_queue(queue); +	} + +	vfree(vif->queues); +	vif->queues = NULL; +	vif->num_queues = 0; + +	free_netdev(vif->dev); + +	module_put(THIS_MODULE); +} diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c new file mode 100644 index 00000000000..c65b636bcab --- /dev/null +++ b/drivers/net/xen-netback/netback.c @@ -0,0 +1,2045 @@ +/* + * Back-end of the driver for virtual network devices. This portion of the + * driver exports a 'unified' network-device interface that can be accessed + * by any operating system that implements a compatible front end. A + * reference front-end implementation can be found in: + *  drivers/net/xen-netfront.c + * + * Copyright (c) 2002-2005, K A Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "common.h" + +#include <linux/kthread.h> +#include <linux/if_vlan.h> +#include <linux/udp.h> +#include <linux/highmem.h> + +#include <net/tcp.h> + +#include <xen/xen.h> +#include <xen/events.h> +#include <xen/interface/memory.h> + +#include <asm/xen/hypercall.h> +#include <asm/xen/page.h> + +/* Provide an option to disable split event channels at load time as + * event channels are limited resource. Split event channels are + * enabled by default. + */ +bool separate_tx_rx_irq = 1; +module_param(separate_tx_rx_irq, bool, 0644); + +/* When guest ring is filled up, qdisc queues the packets for us, but we have + * to timeout them, otherwise other guests' packets can get stuck there + */ +unsigned int rx_drain_timeout_msecs = 10000; +module_param(rx_drain_timeout_msecs, uint, 0444); +unsigned int rx_drain_timeout_jiffies; + +unsigned int xenvif_max_queues; +module_param_named(max_queues, xenvif_max_queues, uint, 0644); +MODULE_PARM_DESC(max_queues, +		 "Maximum number of queues per virtual interface"); + +/* + * This is the maximum slots a skb can have. If a guest sends a skb + * which exceeds this limit it is considered malicious. + */ +#define FATAL_SKB_SLOTS_DEFAULT 20 +static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; +module_param(fatal_skb_slots, uint, 0444); + +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, +			       u8 status); + +static void make_tx_response(struct xenvif_queue *queue, +			     struct xen_netif_tx_request *txp, +			     s8       st); + +static inline int tx_work_todo(struct xenvif_queue *queue); +static inline int rx_work_todo(struct xenvif_queue *queue); + +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, +					     u16      id, +					     s8       st, +					     u16      offset, +					     u16      size, +					     u16      flags); + +static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, +				       u16 idx) +{ +	return page_to_pfn(queue->mmap_pages[idx]); +} + +static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, +					 u16 idx) +{ +	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); +} + +#define callback_param(vif, pending_idx) \ +	(vif->pending_tx_info[pending_idx].callback_struct) + +/* Find the containing VIF's structure from a pointer in pending_tx_info array + */ +static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) +{ +	u16 pending_idx = ubuf->desc; +	struct pending_tx_info *temp = +		container_of(ubuf, struct pending_tx_info, callback_struct); +	return container_of(temp - pending_idx, +			    struct xenvif_queue, +			    pending_tx_info[0]); +} + +/* This is a miniumum size for the linear area to avoid lots of + * calls to __pskb_pull_tail() as we set up checksum offsets. The + * value 128 was chosen as it covers all IPv4 and most likely + * IPv6 headers. + */ +#define PKT_PROT_LEN 128 + +static u16 frag_get_pending_idx(skb_frag_t *frag) +{ +	return (u16)frag->page_offset; +} + +static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) +{ +	frag->page_offset = pending_idx; +} + +static inline pending_ring_idx_t pending_index(unsigned i) +{ +	return i & (MAX_PENDING_REQS-1); +} + +bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) +{ +	RING_IDX prod, cons; + +	do { +		prod = queue->rx.sring->req_prod; +		cons = queue->rx.req_cons; + +		if (prod - cons >= needed) +			return true; + +		queue->rx.sring->req_event = prod + 1; + +		/* Make sure event is visible before we check prod +		 * again. +		 */ +		mb(); +	} while (queue->rx.sring->req_prod != prod); + +	return false; +} + +/* + * Returns true if we should start a new receive buffer instead of + * adding 'size' bytes to a buffer which currently contains 'offset' + * bytes. + */ +static bool start_new_rx_buffer(int offset, unsigned long size, int head, +				bool full_coalesce) +{ +	/* simple case: we have completely filled the current buffer. */ +	if (offset == MAX_BUFFER_OFFSET) +		return true; + +	/* +	 * complex case: start a fresh buffer if the current frag +	 * would overflow the current buffer but only if: +	 *     (i)   this frag would fit completely in the next buffer +	 * and (ii)  there is already some data in the current buffer +	 * and (iii) this is not the head buffer. +	 * and (iv)  there is no need to fully utilize the buffers +	 * +	 * Where: +	 * - (i) stops us splitting a frag into two copies +	 *   unless the frag is too large for a single buffer. +	 * - (ii) stops us from leaving a buffer pointlessly empty. +	 * - (iii) stops us leaving the first buffer +	 *   empty. Strictly speaking this is already covered +	 *   by (ii) but is explicitly checked because +	 *   netfront relies on the first buffer being +	 *   non-empty and can crash otherwise. +	 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS +	 *   slot +	 * +	 * This means we will effectively linearise small +	 * frags but do not needlessly split large buffers +	 * into multiple copies tend to give large frags their +	 * own buffers as before. +	 */ +	BUG_ON(size > MAX_BUFFER_OFFSET); +	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head && +	    !full_coalesce) +		return true; + +	return false; +} + +struct netrx_pending_operations { +	unsigned copy_prod, copy_cons; +	unsigned meta_prod, meta_cons; +	struct gnttab_copy *copy; +	struct xenvif_rx_meta *meta; +	int copy_off; +	grant_ref_t copy_gref; +}; + +static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, +						 struct netrx_pending_operations *npo) +{ +	struct xenvif_rx_meta *meta; +	struct xen_netif_rx_request *req; + +	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); + +	meta = npo->meta + npo->meta_prod++; +	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; +	meta->gso_size = 0; +	meta->size = 0; +	meta->id = req->id; + +	npo->copy_off = 0; +	npo->copy_gref = req->gref; + +	return meta; +} + +struct xenvif_rx_cb { +	int meta_slots_used; +	bool full_coalesce; +}; + +#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) + +/* + * Set up the grant operations for this fragment. If it's a flipping + * interface, we also set up the unmap request from here. + */ +static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, +				 struct netrx_pending_operations *npo, +				 struct page *page, unsigned long size, +				 unsigned long offset, int *head, +				 struct xenvif_queue *foreign_queue, +				 grant_ref_t foreign_gref) +{ +	struct gnttab_copy *copy_gop; +	struct xenvif_rx_meta *meta; +	unsigned long bytes; +	int gso_type = XEN_NETIF_GSO_TYPE_NONE; + +	/* Data must not cross a page boundary. */ +	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); + +	meta = npo->meta + npo->meta_prod - 1; + +	/* Skip unused frames from start of page */ +	page += offset >> PAGE_SHIFT; +	offset &= ~PAGE_MASK; + +	while (size > 0) { +		BUG_ON(offset >= PAGE_SIZE); +		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); + +		bytes = PAGE_SIZE - offset; + +		if (bytes > size) +			bytes = size; + +		if (start_new_rx_buffer(npo->copy_off, +					bytes, +					*head, +					XENVIF_RX_CB(skb)->full_coalesce)) { +			/* +			 * Netfront requires there to be some data in the head +			 * buffer. +			 */ +			BUG_ON(*head); + +			meta = get_next_rx_buffer(queue, npo); +		} + +		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) +			bytes = MAX_BUFFER_OFFSET - npo->copy_off; + +		copy_gop = npo->copy + npo->copy_prod++; +		copy_gop->flags = GNTCOPY_dest_gref; +		copy_gop->len = bytes; + +		if (foreign_queue) { +			copy_gop->source.domid = foreign_queue->vif->domid; +			copy_gop->source.u.ref = foreign_gref; +			copy_gop->flags |= GNTCOPY_source_gref; +		} else { +			copy_gop->source.domid = DOMID_SELF; +			copy_gop->source.u.gmfn = +				virt_to_mfn(page_address(page)); +		} +		copy_gop->source.offset = offset; + +		copy_gop->dest.domid = queue->vif->domid; +		copy_gop->dest.offset = npo->copy_off; +		copy_gop->dest.u.ref = npo->copy_gref; + +		npo->copy_off += bytes; +		meta->size += bytes; + +		offset += bytes; +		size -= bytes; + +		/* Next frame */ +		if (offset == PAGE_SIZE && size) { +			BUG_ON(!PageCompound(page)); +			page++; +			offset = 0; +		} + +		/* Leave a gap for the GSO descriptor. */ +		if (skb_is_gso(skb)) { +			if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) +				gso_type = XEN_NETIF_GSO_TYPE_TCPV4; +			else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) +				gso_type = XEN_NETIF_GSO_TYPE_TCPV6; +		} + +		if (*head && ((1 << gso_type) & queue->vif->gso_mask)) +			queue->rx.req_cons++; + +		*head = 0; /* There must be something in this buffer now. */ + +	} +} + +/* + * Find the grant ref for a given frag in a chain of struct ubuf_info's + * skb: the skb itself + * i: the frag's number + * ubuf: a pointer to an element in the chain. It should not be NULL + * + * Returns a pointer to the element in the chain where the page were found. If + * not found, returns NULL. + * See the definition of callback_struct in common.h for more details about + * the chain. + */ +static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, +						const int i, +						const struct ubuf_info *ubuf) +{ +	struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf); + +	do { +		u16 pending_idx = ubuf->desc; + +		if (skb_shinfo(skb)->frags[i].page.p == +		    foreign_queue->mmap_pages[pending_idx]) +			break; +		ubuf = (struct ubuf_info *) ubuf->ctx; +	} while (ubuf); + +	return ubuf; +} + +/* + * Prepare an SKB to be transmitted to the frontend. + * + * This function is responsible for allocating grant operations, meta + * structures, etc. + * + * It returns the number of meta structures consumed. The number of + * ring slots used is always equal to the number of meta slots used + * plus the number of GSO descriptors used. Currently, we use either + * zero GSO descriptors (for non-GSO packets) or one descriptor (for + * frontend-side LRO). + */ +static int xenvif_gop_skb(struct sk_buff *skb, +			  struct netrx_pending_operations *npo, +			  struct xenvif_queue *queue) +{ +	struct xenvif *vif = netdev_priv(skb->dev); +	int nr_frags = skb_shinfo(skb)->nr_frags; +	int i; +	struct xen_netif_rx_request *req; +	struct xenvif_rx_meta *meta; +	unsigned char *data; +	int head = 1; +	int old_meta_prod; +	int gso_type; +	const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; +	const struct ubuf_info *const head_ubuf = ubuf; + +	old_meta_prod = npo->meta_prod; + +	gso_type = XEN_NETIF_GSO_TYPE_NONE; +	if (skb_is_gso(skb)) { +		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) +			gso_type = XEN_NETIF_GSO_TYPE_TCPV4; +		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) +			gso_type = XEN_NETIF_GSO_TYPE_TCPV6; +	} + +	/* Set up a GSO prefix descriptor, if necessary */ +	if ((1 << gso_type) & vif->gso_prefix_mask) { +		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); +		meta = npo->meta + npo->meta_prod++; +		meta->gso_type = gso_type; +		meta->gso_size = skb_shinfo(skb)->gso_size; +		meta->size = 0; +		meta->id = req->id; +	} + +	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); +	meta = npo->meta + npo->meta_prod++; + +	if ((1 << gso_type) & vif->gso_mask) { +		meta->gso_type = gso_type; +		meta->gso_size = skb_shinfo(skb)->gso_size; +	} else { +		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; +		meta->gso_size = 0; +	} + +	meta->size = 0; +	meta->id = req->id; +	npo->copy_off = 0; +	npo->copy_gref = req->gref; + +	data = skb->data; +	while (data < skb_tail_pointer(skb)) { +		unsigned int offset = offset_in_page(data); +		unsigned int len = PAGE_SIZE - offset; + +		if (data + len > skb_tail_pointer(skb)) +			len = skb_tail_pointer(skb) - data; + +		xenvif_gop_frag_copy(queue, skb, npo, +				     virt_to_page(data), len, offset, &head, +				     NULL, +				     0); +		data += len; +	} + +	for (i = 0; i < nr_frags; i++) { +		/* This variable also signals whether foreign_gref has a real +		 * value or not. +		 */ +		struct xenvif_queue *foreign_queue = NULL; +		grant_ref_t foreign_gref; + +		if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && +			(ubuf->callback == &xenvif_zerocopy_callback)) { +			const struct ubuf_info *const startpoint = ubuf; + +			/* Ideally ubuf points to the chain element which +			 * belongs to this frag. Or if frags were removed from +			 * the beginning, then shortly before it. +			 */ +			ubuf = xenvif_find_gref(skb, i, ubuf); + +			/* Try again from the beginning of the list, if we +			 * haven't tried from there. This only makes sense in +			 * the unlikely event of reordering the original frags. +			 * For injected local pages it's an unnecessary second +			 * run. +			 */ +			if (unlikely(!ubuf) && startpoint != head_ubuf) +				ubuf = xenvif_find_gref(skb, i, head_ubuf); + +			if (likely(ubuf)) { +				u16 pending_idx = ubuf->desc; + +				foreign_queue = ubuf_to_queue(ubuf); +				foreign_gref = +					foreign_queue->pending_tx_info[pending_idx].req.gref; +				/* Just a safety measure. If this was the last +				 * element on the list, the for loop will +				 * iterate again if a local page were added to +				 * the end. Using head_ubuf here prevents the +				 * second search on the chain. Or the original +				 * frags changed order, but that's less likely. +				 * In any way, ubuf shouldn't be NULL. +				 */ +				ubuf = ubuf->ctx ? +					(struct ubuf_info *) ubuf->ctx : +					head_ubuf; +			} else +				/* This frag was a local page, added to the +				 * array after the skb left netback. +				 */ +				ubuf = head_ubuf; +		} +		xenvif_gop_frag_copy(queue, skb, npo, +				     skb_frag_page(&skb_shinfo(skb)->frags[i]), +				     skb_frag_size(&skb_shinfo(skb)->frags[i]), +				     skb_shinfo(skb)->frags[i].page_offset, +				     &head, +				     foreign_queue, +				     foreign_queue ? foreign_gref : UINT_MAX); +	} + +	return npo->meta_prod - old_meta_prod; +} + +/* + * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was + * used to set up the operations on the top of + * netrx_pending_operations, which have since been done.  Check that + * they didn't give any errors and advance over them. + */ +static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, +			    struct netrx_pending_operations *npo) +{ +	struct gnttab_copy     *copy_op; +	int status = XEN_NETIF_RSP_OKAY; +	int i; + +	for (i = 0; i < nr_meta_slots; i++) { +		copy_op = npo->copy + npo->copy_cons++; +		if (copy_op->status != GNTST_okay) { +			netdev_dbg(vif->dev, +				   "Bad status %d from copy to DOM%d.\n", +				   copy_op->status, vif->domid); +			status = XEN_NETIF_RSP_ERROR; +		} +	} + +	return status; +} + +static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, +				      struct xenvif_rx_meta *meta, +				      int nr_meta_slots) +{ +	int i; +	unsigned long offset; + +	/* No fragments used */ +	if (nr_meta_slots <= 1) +		return; + +	nr_meta_slots--; + +	for (i = 0; i < nr_meta_slots; i++) { +		int flags; +		if (i == nr_meta_slots - 1) +			flags = 0; +		else +			flags = XEN_NETRXF_more_data; + +		offset = 0; +		make_rx_response(queue, meta[i].id, status, offset, +				 meta[i].size, flags); +	} +} + +void xenvif_kick_thread(struct xenvif_queue *queue) +{ +	wake_up(&queue->wq); +} + +static void xenvif_rx_action(struct xenvif_queue *queue) +{ +	s8 status; +	u16 flags; +	struct xen_netif_rx_response *resp; +	struct sk_buff_head rxq; +	struct sk_buff *skb; +	LIST_HEAD(notify); +	int ret; +	unsigned long offset; +	bool need_to_notify = false; + +	struct netrx_pending_operations npo = { +		.copy  = queue->grant_copy_op, +		.meta  = queue->meta, +	}; + +	skb_queue_head_init(&rxq); + +	while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { +		RING_IDX max_slots_needed; +		RING_IDX old_req_cons; +		RING_IDX ring_slots_used; +		int i; + +		/* We need a cheap worse case estimate for the number of +		 * slots we'll use. +		 */ + +		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + +						skb_headlen(skb), +						PAGE_SIZE); +		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { +			unsigned int size; +			unsigned int offset; + +			size = skb_frag_size(&skb_shinfo(skb)->frags[i]); +			offset = skb_shinfo(skb)->frags[i].page_offset; + +			/* For a worse-case estimate we need to factor in +			 * the fragment page offset as this will affect the +			 * number of times xenvif_gop_frag_copy() will +			 * call start_new_rx_buffer(). +			 */ +			max_slots_needed += DIV_ROUND_UP(offset + size, +							 PAGE_SIZE); +		} + +		/* To avoid the estimate becoming too pessimal for some +		 * frontends that limit posted rx requests, cap the estimate +		 * at MAX_SKB_FRAGS. In this case netback will fully coalesce +		 * the skb into the provided slots. +		 */ +		if (max_slots_needed > MAX_SKB_FRAGS) { +			max_slots_needed = MAX_SKB_FRAGS; +			XENVIF_RX_CB(skb)->full_coalesce = true; +		} else { +			XENVIF_RX_CB(skb)->full_coalesce = false; +		} + +		/* We may need one more slot for GSO metadata */ +		if (skb_is_gso(skb) && +		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || +		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) +			max_slots_needed++; + +		/* If the skb may not fit then bail out now */ +		if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) { +			skb_queue_head(&queue->rx_queue, skb); +			need_to_notify = true; +			queue->rx_last_skb_slots = max_slots_needed; +			break; +		} else +			queue->rx_last_skb_slots = 0; + +		old_req_cons = queue->rx.req_cons; +		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); +		ring_slots_used = queue->rx.req_cons - old_req_cons; + +		BUG_ON(ring_slots_used > max_slots_needed); + +		__skb_queue_tail(&rxq, skb); +	} + +	BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); + +	if (!npo.copy_prod) +		goto done; + +	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); +	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); + +	while ((skb = __skb_dequeue(&rxq)) != NULL) { + +		if ((1 << queue->meta[npo.meta_cons].gso_type) & +		    queue->vif->gso_prefix_mask) { +			resp = RING_GET_RESPONSE(&queue->rx, +						 queue->rx.rsp_prod_pvt++); + +			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; + +			resp->offset = queue->meta[npo.meta_cons].gso_size; +			resp->id = queue->meta[npo.meta_cons].id; +			resp->status = XENVIF_RX_CB(skb)->meta_slots_used; + +			npo.meta_cons++; +			XENVIF_RX_CB(skb)->meta_slots_used--; +		} + + +		queue->stats.tx_bytes += skb->len; +		queue->stats.tx_packets++; + +		status = xenvif_check_gop(queue->vif, +					  XENVIF_RX_CB(skb)->meta_slots_used, +					  &npo); + +		if (XENVIF_RX_CB(skb)->meta_slots_used == 1) +			flags = 0; +		else +			flags = XEN_NETRXF_more_data; + +		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ +			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; +		else if (skb->ip_summed == CHECKSUM_UNNECESSARY) +			/* remote but checksummed. */ +			flags |= XEN_NETRXF_data_validated; + +		offset = 0; +		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, +					status, offset, +					queue->meta[npo.meta_cons].size, +					flags); + +		if ((1 << queue->meta[npo.meta_cons].gso_type) & +		    queue->vif->gso_mask) { +			struct xen_netif_extra_info *gso = +				(struct xen_netif_extra_info *) +				RING_GET_RESPONSE(&queue->rx, +						  queue->rx.rsp_prod_pvt++); + +			resp->flags |= XEN_NETRXF_extra_info; + +			gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; +			gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; +			gso->u.gso.pad = 0; +			gso->u.gso.features = 0; + +			gso->type = XEN_NETIF_EXTRA_TYPE_GSO; +			gso->flags = 0; +		} + +		xenvif_add_frag_responses(queue, status, +					  queue->meta + npo.meta_cons + 1, +					  XENVIF_RX_CB(skb)->meta_slots_used); + +		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); + +		need_to_notify |= !!ret; + +		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; +		dev_kfree_skb(skb); +	} + +done: +	if (need_to_notify) +		notify_remote_via_irq(queue->rx_irq); +} + +void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) +{ +	int more_to_do; + +	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); + +	if (more_to_do) +		napi_schedule(&queue->napi); +} + +static void tx_add_credit(struct xenvif_queue *queue) +{ +	unsigned long max_burst, max_credit; + +	/* +	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. +	 * Otherwise the interface can seize up due to insufficient credit. +	 */ +	max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; +	max_burst = min(max_burst, 131072UL); +	max_burst = max(max_burst, queue->credit_bytes); + +	/* Take care that adding a new chunk of credit doesn't wrap to zero. */ +	max_credit = queue->remaining_credit + queue->credit_bytes; +	if (max_credit < queue->remaining_credit) +		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ + +	queue->remaining_credit = min(max_credit, max_burst); +} + +static void tx_credit_callback(unsigned long data) +{ +	struct xenvif_queue *queue = (struct xenvif_queue *)data; +	tx_add_credit(queue); +	xenvif_napi_schedule_or_enable_events(queue); +} + +static void xenvif_tx_err(struct xenvif_queue *queue, +			  struct xen_netif_tx_request *txp, RING_IDX end) +{ +	RING_IDX cons = queue->tx.req_cons; +	unsigned long flags; + +	do { +		spin_lock_irqsave(&queue->response_lock, flags); +		make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); +		spin_unlock_irqrestore(&queue->response_lock, flags); +		if (cons == end) +			break; +		txp = RING_GET_REQUEST(&queue->tx, cons++); +	} while (1); +	queue->tx.req_cons = cons; +} + +static void xenvif_fatal_tx_err(struct xenvif *vif) +{ +	netdev_err(vif->dev, "fatal error; disabling device\n"); +	vif->disabled = true; +	/* Disable the vif from queue 0's kthread */ +	if (vif->queues) +		xenvif_kick_thread(&vif->queues[0]); +} + +static int xenvif_count_requests(struct xenvif_queue *queue, +				 struct xen_netif_tx_request *first, +				 struct xen_netif_tx_request *txp, +				 int work_to_do) +{ +	RING_IDX cons = queue->tx.req_cons; +	int slots = 0; +	int drop_err = 0; +	int more_data; + +	if (!(first->flags & XEN_NETTXF_more_data)) +		return 0; + +	do { +		struct xen_netif_tx_request dropped_tx = { 0 }; + +		if (slots >= work_to_do) { +			netdev_err(queue->vif->dev, +				   "Asked for %d slots but exceeds this limit\n", +				   work_to_do); +			xenvif_fatal_tx_err(queue->vif); +			return -ENODATA; +		} + +		/* This guest is really using too many slots and +		 * considered malicious. +		 */ +		if (unlikely(slots >= fatal_skb_slots)) { +			netdev_err(queue->vif->dev, +				   "Malicious frontend using %d slots, threshold %u\n", +				   slots, fatal_skb_slots); +			xenvif_fatal_tx_err(queue->vif); +			return -E2BIG; +		} + +		/* Xen network protocol had implicit dependency on +		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to +		 * the historical MAX_SKB_FRAGS value 18 to honor the +		 * same behavior as before. Any packet using more than +		 * 18 slots but less than fatal_skb_slots slots is +		 * dropped +		 */ +		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { +			if (net_ratelimit()) +				netdev_dbg(queue->vif->dev, +					   "Too many slots (%d) exceeding limit (%d), dropping packet\n", +					   slots, XEN_NETBK_LEGACY_SLOTS_MAX); +			drop_err = -E2BIG; +		} + +		if (drop_err) +			txp = &dropped_tx; + +		memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), +		       sizeof(*txp)); + +		/* If the guest submitted a frame >= 64 KiB then +		 * first->size overflowed and following slots will +		 * appear to be larger than the frame. +		 * +		 * This cannot be fatal error as there are buggy +		 * frontends that do this. +		 * +		 * Consume all slots and drop the packet. +		 */ +		if (!drop_err && txp->size > first->size) { +			if (net_ratelimit()) +				netdev_dbg(queue->vif->dev, +					   "Invalid tx request, slot size %u > remaining size %u\n", +					   txp->size, first->size); +			drop_err = -EIO; +		} + +		first->size -= txp->size; +		slots++; + +		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { +			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", +				 txp->offset, txp->size); +			xenvif_fatal_tx_err(queue->vif); +			return -EINVAL; +		} + +		more_data = txp->flags & XEN_NETTXF_more_data; + +		if (!drop_err) +			txp++; + +	} while (more_data); + +	if (drop_err) { +		xenvif_tx_err(queue, first, cons + slots); +		return drop_err; +	} + +	return slots; +} + + +struct xenvif_tx_cb { +	u16 pending_idx; +}; + +#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) + +static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, +					  u16 pending_idx, +					  struct xen_netif_tx_request *txp, +					  struct gnttab_map_grant_ref *mop) +{ +	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; +	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), +			  GNTMAP_host_map | GNTMAP_readonly, +			  txp->gref, queue->vif->domid); + +	memcpy(&queue->pending_tx_info[pending_idx].req, txp, +	       sizeof(*txp)); +} + +static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) +{ +	struct sk_buff *skb = +		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, +			  GFP_ATOMIC | __GFP_NOWARN); +	if (unlikely(skb == NULL)) +		return NULL; + +	/* Packets passed to netif_rx() must have some headroom. */ +	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + +	/* Initialize it here to avoid later surprises */ +	skb_shinfo(skb)->destructor_arg = NULL; + +	return skb; +} + +static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, +							struct sk_buff *skb, +							struct xen_netif_tx_request *txp, +							struct gnttab_map_grant_ref *gop) +{ +	struct skb_shared_info *shinfo = skb_shinfo(skb); +	skb_frag_t *frags = shinfo->frags; +	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; +	int start; +	pending_ring_idx_t index; +	unsigned int nr_slots, frag_overflow = 0; + +	/* At this point shinfo->nr_frags is in fact the number of +	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. +	 */ +	if (shinfo->nr_frags > MAX_SKB_FRAGS) { +		frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; +		BUG_ON(frag_overflow > MAX_SKB_FRAGS); +		shinfo->nr_frags = MAX_SKB_FRAGS; +	} +	nr_slots = shinfo->nr_frags; + +	/* Skip first skb fragment if it is on same page as header fragment. */ +	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + +	for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; +	     shinfo->nr_frags++, txp++, gop++) { +		index = pending_index(queue->pending_cons++); +		pending_idx = queue->pending_ring[index]; +		xenvif_tx_create_map_op(queue, pending_idx, txp, gop); +		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); +	} + +	if (frag_overflow) { +		struct sk_buff *nskb = xenvif_alloc_skb(0); +		if (unlikely(nskb == NULL)) { +			if (net_ratelimit()) +				netdev_err(queue->vif->dev, +					   "Can't allocate the frag_list skb.\n"); +			return NULL; +		} + +		shinfo = skb_shinfo(nskb); +		frags = shinfo->frags; + +		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; +		     shinfo->nr_frags++, txp++, gop++) { +			index = pending_index(queue->pending_cons++); +			pending_idx = queue->pending_ring[index]; +			xenvif_tx_create_map_op(queue, pending_idx, txp, gop); +			frag_set_pending_idx(&frags[shinfo->nr_frags], +					     pending_idx); +		} + +		skb_shinfo(skb)->frag_list = nskb; +	} + +	return gop; +} + +static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, +					   u16 pending_idx, +					   grant_handle_t handle) +{ +	if (unlikely(queue->grant_tx_handle[pending_idx] != +		     NETBACK_INVALID_HANDLE)) { +		netdev_err(queue->vif->dev, +			   "Trying to overwrite active handle! pending_idx: %x\n", +			   pending_idx); +		BUG(); +	} +	queue->grant_tx_handle[pending_idx] = handle; +} + +static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, +					     u16 pending_idx) +{ +	if (unlikely(queue->grant_tx_handle[pending_idx] == +		     NETBACK_INVALID_HANDLE)) { +		netdev_err(queue->vif->dev, +			   "Trying to unmap invalid handle! pending_idx: %x\n", +			   pending_idx); +		BUG(); +	} +	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; +} + +static int xenvif_tx_check_gop(struct xenvif_queue *queue, +			       struct sk_buff *skb, +			       struct gnttab_map_grant_ref **gopp_map, +			       struct gnttab_copy **gopp_copy) +{ +	struct gnttab_map_grant_ref *gop_map = *gopp_map; +	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; +	/* This always points to the shinfo of the skb being checked, which +	 * could be either the first or the one on the frag_list +	 */ +	struct skb_shared_info *shinfo = skb_shinfo(skb); +	/* If this is non-NULL, we are currently checking the frag_list skb, and +	 * this points to the shinfo of the first one +	 */ +	struct skb_shared_info *first_shinfo = NULL; +	int nr_frags = shinfo->nr_frags; +	const bool sharedslot = nr_frags && +				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; +	int i, err; + +	/* Check status of header. */ +	err = (*gopp_copy)->status; +	if (unlikely(err)) { +		if (net_ratelimit()) +			netdev_dbg(queue->vif->dev, +				   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", +				   (*gopp_copy)->status, +				   pending_idx, +				   (*gopp_copy)->source.u.ref); +		/* The first frag might still have this slot mapped */ +		if (!sharedslot) +			xenvif_idx_release(queue, pending_idx, +					   XEN_NETIF_RSP_ERROR); +	} +	(*gopp_copy)++; + +check_frags: +	for (i = 0; i < nr_frags; i++, gop_map++) { +		int j, newerr; + +		pending_idx = frag_get_pending_idx(&shinfo->frags[i]); + +		/* Check error status: if okay then remember grant handle. */ +		newerr = gop_map->status; + +		if (likely(!newerr)) { +			xenvif_grant_handle_set(queue, +						pending_idx, +						gop_map->handle); +			/* Had a previous error? Invalidate this fragment. */ +			if (unlikely(err)) { +				xenvif_idx_unmap(queue, pending_idx); +				/* If the mapping of the first frag was OK, but +				 * the header's copy failed, and they are +				 * sharing a slot, send an error +				 */ +				if (i == 0 && sharedslot) +					xenvif_idx_release(queue, pending_idx, +							   XEN_NETIF_RSP_ERROR); +				else +					xenvif_idx_release(queue, pending_idx, +							   XEN_NETIF_RSP_OKAY); +			} +			continue; +		} + +		/* Error on this fragment: respond to client with an error. */ +		if (net_ratelimit()) +			netdev_dbg(queue->vif->dev, +				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", +				   i, +				   gop_map->status, +				   pending_idx, +				   gop_map->ref); + +		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); + +		/* Not the first error? Preceding frags already invalidated. */ +		if (err) +			continue; + +		/* First error: if the header haven't shared a slot with the +		 * first frag, release it as well. +		 */ +		if (!sharedslot) +			xenvif_idx_release(queue, +					   XENVIF_TX_CB(skb)->pending_idx, +					   XEN_NETIF_RSP_OKAY); + +		/* Invalidate preceding fragments of this skb. */ +		for (j = 0; j < i; j++) { +			pending_idx = frag_get_pending_idx(&shinfo->frags[j]); +			xenvif_idx_unmap(queue, pending_idx); +			xenvif_idx_release(queue, pending_idx, +					   XEN_NETIF_RSP_OKAY); +		} + +		/* And if we found the error while checking the frag_list, unmap +		 * the first skb's frags +		 */ +		if (first_shinfo) { +			for (j = 0; j < first_shinfo->nr_frags; j++) { +				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); +				xenvif_idx_unmap(queue, pending_idx); +				xenvif_idx_release(queue, pending_idx, +						   XEN_NETIF_RSP_OKAY); +			} +		} + +		/* Remember the error: invalidate all subsequent fragments. */ +		err = newerr; +	} + +	if (skb_has_frag_list(skb) && !first_shinfo) { +		first_shinfo = skb_shinfo(skb); +		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); +		nr_frags = shinfo->nr_frags; + +		goto check_frags; +	} + +	*gopp_map = gop_map; +	return err; +} + +static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) +{ +	struct skb_shared_info *shinfo = skb_shinfo(skb); +	int nr_frags = shinfo->nr_frags; +	int i; +	u16 prev_pending_idx = INVALID_PENDING_IDX; + +	for (i = 0; i < nr_frags; i++) { +		skb_frag_t *frag = shinfo->frags + i; +		struct xen_netif_tx_request *txp; +		struct page *page; +		u16 pending_idx; + +		pending_idx = frag_get_pending_idx(frag); + +		/* If this is not the first frag, chain it to the previous*/ +		if (prev_pending_idx == INVALID_PENDING_IDX) +			skb_shinfo(skb)->destructor_arg = +				&callback_param(queue, pending_idx); +		else +			callback_param(queue, prev_pending_idx).ctx = +				&callback_param(queue, pending_idx); + +		callback_param(queue, pending_idx).ctx = NULL; +		prev_pending_idx = pending_idx; + +		txp = &queue->pending_tx_info[pending_idx].req; +		page = virt_to_page(idx_to_kaddr(queue, pending_idx)); +		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size); +		skb->len += txp->size; +		skb->data_len += txp->size; +		skb->truesize += txp->size; + +		/* Take an extra reference to offset network stack's put_page */ +		get_page(queue->mmap_pages[pending_idx]); +	} +	/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc +	 * overlaps with "index", and "mapping" is not set. I think mapping +	 * should be set. If delivered to local stack, it would drop this +	 * skb in sk_filter unless the socket has the right to use it. +	 */ +	skb->pfmemalloc	= false; +} + +static int xenvif_get_extras(struct xenvif_queue *queue, +				struct xen_netif_extra_info *extras, +				int work_to_do) +{ +	struct xen_netif_extra_info extra; +	RING_IDX cons = queue->tx.req_cons; + +	do { +		if (unlikely(work_to_do-- <= 0)) { +			netdev_err(queue->vif->dev, "Missing extra info\n"); +			xenvif_fatal_tx_err(queue->vif); +			return -EBADR; +		} + +		memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), +		       sizeof(extra)); +		if (unlikely(!extra.type || +			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { +			queue->tx.req_cons = ++cons; +			netdev_err(queue->vif->dev, +				   "Invalid extra type: %d\n", extra.type); +			xenvif_fatal_tx_err(queue->vif); +			return -EINVAL; +		} + +		memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); +		queue->tx.req_cons = ++cons; +	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); + +	return work_to_do; +} + +static int xenvif_set_skb_gso(struct xenvif *vif, +			      struct sk_buff *skb, +			      struct xen_netif_extra_info *gso) +{ +	if (!gso->u.gso.size) { +		netdev_err(vif->dev, "GSO size must not be zero.\n"); +		xenvif_fatal_tx_err(vif); +		return -EINVAL; +	} + +	switch (gso->u.gso.type) { +	case XEN_NETIF_GSO_TYPE_TCPV4: +		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +		break; +	case XEN_NETIF_GSO_TYPE_TCPV6: +		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; +		break; +	default: +		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); +		xenvif_fatal_tx_err(vif); +		return -EINVAL; +	} + +	skb_shinfo(skb)->gso_size = gso->u.gso.size; +	/* gso_segs will be calculated later */ + +	return 0; +} + +static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) +{ +	bool recalculate_partial_csum = false; + +	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy +	 * peers can fail to set NETRXF_csum_blank when sending a GSO +	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and +	 * recalculate the partial checksum. +	 */ +	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { +		queue->stats.rx_gso_checksum_fixup++; +		skb->ip_summed = CHECKSUM_PARTIAL; +		recalculate_partial_csum = true; +	} + +	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */ +	if (skb->ip_summed != CHECKSUM_PARTIAL) +		return 0; + +	return skb_checksum_setup(skb, recalculate_partial_csum); +} + +static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) +{ +	u64 now = get_jiffies_64(); +	u64 next_credit = queue->credit_window_start + +		msecs_to_jiffies(queue->credit_usec / 1000); + +	/* Timer could already be pending in rare cases. */ +	if (timer_pending(&queue->credit_timeout)) +		return true; + +	/* Passed the point where we can replenish credit? */ +	if (time_after_eq64(now, next_credit)) { +		queue->credit_window_start = now; +		tx_add_credit(queue); +	} + +	/* Still too big to send right now? Set a callback. */ +	if (size > queue->remaining_credit) { +		queue->credit_timeout.data     = +			(unsigned long)queue; +		queue->credit_timeout.function = +			tx_credit_callback; +		mod_timer(&queue->credit_timeout, +			  next_credit); +		queue->credit_window_start = next_credit; + +		return true; +	} + +	return false; +} + +static void xenvif_tx_build_gops(struct xenvif_queue *queue, +				     int budget, +				     unsigned *copy_ops, +				     unsigned *map_ops) +{ +	struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; +	struct sk_buff *skb; +	int ret; + +	while (skb_queue_len(&queue->tx_queue) < budget) { +		struct xen_netif_tx_request txreq; +		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; +		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; +		u16 pending_idx; +		RING_IDX idx; +		int work_to_do; +		unsigned int data_len; +		pending_ring_idx_t index; + +		if (queue->tx.sring->req_prod - queue->tx.req_cons > +		    XEN_NETIF_TX_RING_SIZE) { +			netdev_err(queue->vif->dev, +				   "Impossible number of requests. " +				   "req_prod %d, req_cons %d, size %ld\n", +				   queue->tx.sring->req_prod, queue->tx.req_cons, +				   XEN_NETIF_TX_RING_SIZE); +			xenvif_fatal_tx_err(queue->vif); +			break; +		} + +		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); +		if (!work_to_do) +			break; + +		idx = queue->tx.req_cons; +		rmb(); /* Ensure that we see the request before we copy it. */ +		memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); + +		/* Credit-based scheduling. */ +		if (txreq.size > queue->remaining_credit && +		    tx_credit_exceeded(queue, txreq.size)) +			break; + +		queue->remaining_credit -= txreq.size; + +		work_to_do--; +		queue->tx.req_cons = ++idx; + +		memset(extras, 0, sizeof(extras)); +		if (txreq.flags & XEN_NETTXF_extra_info) { +			work_to_do = xenvif_get_extras(queue, extras, +						       work_to_do); +			idx = queue->tx.req_cons; +			if (unlikely(work_to_do < 0)) +				break; +		} + +		ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); +		if (unlikely(ret < 0)) +			break; + +		idx += ret; + +		if (unlikely(txreq.size < ETH_HLEN)) { +			netdev_dbg(queue->vif->dev, +				   "Bad packet size: %d\n", txreq.size); +			xenvif_tx_err(queue, &txreq, idx); +			break; +		} + +		/* No crossing a page as the payload mustn't fragment. */ +		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { +			netdev_err(queue->vif->dev, +				   "txreq.offset: %x, size: %u, end: %lu\n", +				   txreq.offset, txreq.size, +				   (txreq.offset&~PAGE_MASK) + txreq.size); +			xenvif_fatal_tx_err(queue->vif); +			break; +		} + +		index = pending_index(queue->pending_cons); +		pending_idx = queue->pending_ring[index]; + +		data_len = (txreq.size > PKT_PROT_LEN && +			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? +			PKT_PROT_LEN : txreq.size; + +		skb = xenvif_alloc_skb(data_len); +		if (unlikely(skb == NULL)) { +			netdev_dbg(queue->vif->dev, +				   "Can't allocate a skb in start_xmit.\n"); +			xenvif_tx_err(queue, &txreq, idx); +			break; +		} + +		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { +			struct xen_netif_extra_info *gso; +			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; + +			if (xenvif_set_skb_gso(queue->vif, skb, gso)) { +				/* Failure in xenvif_set_skb_gso is fatal. */ +				kfree_skb(skb); +				break; +			} +		} + +		XENVIF_TX_CB(skb)->pending_idx = pending_idx; + +		__skb_put(skb, data_len); +		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; +		queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; +		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; + +		queue->tx_copy_ops[*copy_ops].dest.u.gmfn = +			virt_to_mfn(skb->data); +		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; +		queue->tx_copy_ops[*copy_ops].dest.offset = +			offset_in_page(skb->data); + +		queue->tx_copy_ops[*copy_ops].len = data_len; +		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; + +		(*copy_ops)++; + +		skb_shinfo(skb)->nr_frags = ret; +		if (data_len < txreq.size) { +			skb_shinfo(skb)->nr_frags++; +			frag_set_pending_idx(&skb_shinfo(skb)->frags[0], +					     pending_idx); +			xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); +			gop++; +		} else { +			frag_set_pending_idx(&skb_shinfo(skb)->frags[0], +					     INVALID_PENDING_IDX); +			memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, +			       sizeof(txreq)); +		} + +		queue->pending_cons++; + +		request_gop = xenvif_get_requests(queue, skb, txfrags, gop); +		if (request_gop == NULL) { +			kfree_skb(skb); +			xenvif_tx_err(queue, &txreq, idx); +			break; +		} +		gop = request_gop; + +		__skb_queue_tail(&queue->tx_queue, skb); + +		queue->tx.req_cons = idx; + +		if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || +		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) +			break; +	} + +	(*map_ops) = gop - queue->tx_map_ops; +	return; +} + +/* Consolidate skb with a frag_list into a brand new one with local pages on + * frags. Returns 0 or -ENOMEM if can't allocate new pages. + */ +static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) +{ +	unsigned int offset = skb_headlen(skb); +	skb_frag_t frags[MAX_SKB_FRAGS]; +	int i; +	struct ubuf_info *uarg; +	struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + +	queue->stats.tx_zerocopy_sent += 2; +	queue->stats.tx_frag_overflow++; + +	xenvif_fill_frags(queue, nskb); +	/* Subtract frags size, we will correct it later */ +	skb->truesize -= skb->data_len; +	skb->len += nskb->len; +	skb->data_len += nskb->len; + +	/* create a brand new frags array and coalesce there */ +	for (i = 0; offset < skb->len; i++) { +		struct page *page; +		unsigned int len; + +		BUG_ON(i >= MAX_SKB_FRAGS); +		page = alloc_page(GFP_ATOMIC|__GFP_COLD); +		if (!page) { +			int j; +			skb->truesize += skb->data_len; +			for (j = 0; j < i; j++) +				put_page(frags[j].page.p); +			return -ENOMEM; +		} + +		if (offset + PAGE_SIZE < skb->len) +			len = PAGE_SIZE; +		else +			len = skb->len - offset; +		if (skb_copy_bits(skb, offset, page_address(page), len)) +			BUG(); + +		offset += len; +		frags[i].page.p = page; +		frags[i].page_offset = 0; +		skb_frag_size_set(&frags[i], len); +	} +	/* swap out with old one */ +	memcpy(skb_shinfo(skb)->frags, +	       frags, +	       i * sizeof(skb_frag_t)); +	skb_shinfo(skb)->nr_frags = i; +	skb->truesize += i * PAGE_SIZE; + +	/* remove traces of mapped pages and frag_list */ +	skb_frag_list_init(skb); +	uarg = skb_shinfo(skb)->destructor_arg; +	uarg->callback(uarg, true); +	skb_shinfo(skb)->destructor_arg = NULL; + +	skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; +	kfree_skb(nskb); + +	return 0; +} + +static int xenvif_tx_submit(struct xenvif_queue *queue) +{ +	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; +	struct gnttab_copy *gop_copy = queue->tx_copy_ops; +	struct sk_buff *skb; +	int work_done = 0; + +	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { +		struct xen_netif_tx_request *txp; +		u16 pending_idx; +		unsigned data_len; + +		pending_idx = XENVIF_TX_CB(skb)->pending_idx; +		txp = &queue->pending_tx_info[pending_idx].req; + +		/* Check the remap error code. */ +		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { +			/* If there was an error, xenvif_tx_check_gop is +			 * expected to release all the frags which were mapped, +			 * so kfree_skb shouldn't do it again +			 */ +			skb_shinfo(skb)->nr_frags = 0; +			if (skb_has_frag_list(skb)) { +				struct sk_buff *nskb = +						skb_shinfo(skb)->frag_list; +				skb_shinfo(nskb)->nr_frags = 0; +			} +			kfree_skb(skb); +			continue; +		} + +		data_len = skb->len; +		callback_param(queue, pending_idx).ctx = NULL; +		if (data_len < txp->size) { +			/* Append the packet payload as a fragment. */ +			txp->offset += data_len; +			txp->size -= data_len; +		} else { +			/* Schedule a response immediately. */ +			xenvif_idx_release(queue, pending_idx, +					   XEN_NETIF_RSP_OKAY); +		} + +		if (txp->flags & XEN_NETTXF_csum_blank) +			skb->ip_summed = CHECKSUM_PARTIAL; +		else if (txp->flags & XEN_NETTXF_data_validated) +			skb->ip_summed = CHECKSUM_UNNECESSARY; + +		xenvif_fill_frags(queue, skb); + +		if (unlikely(skb_has_frag_list(skb))) { +			if (xenvif_handle_frag_list(queue, skb)) { +				if (net_ratelimit()) +					netdev_err(queue->vif->dev, +						   "Not enough memory to consolidate frag_list!\n"); +				skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; +				kfree_skb(skb); +				continue; +			} +		} + +		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { +			int target = min_t(int, skb->len, PKT_PROT_LEN); +			__pskb_pull_tail(skb, target - skb_headlen(skb)); +		} + +		skb->dev      = queue->vif->dev; +		skb->protocol = eth_type_trans(skb, skb->dev); +		skb_reset_network_header(skb); + +		if (checksum_setup(queue, skb)) { +			netdev_dbg(queue->vif->dev, +				   "Can't setup checksum in net_tx_action\n"); +			/* We have to set this flag to trigger the callback */ +			if (skb_shinfo(skb)->destructor_arg) +				skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; +			kfree_skb(skb); +			continue; +		} + +		skb_probe_transport_header(skb, 0); + +		/* If the packet is GSO then we will have just set up the +		 * transport header offset in checksum_setup so it's now +		 * straightforward to calculate gso_segs. +		 */ +		if (skb_is_gso(skb)) { +			int mss = skb_shinfo(skb)->gso_size; +			int hdrlen = skb_transport_header(skb) - +				skb_mac_header(skb) + +				tcp_hdrlen(skb); + +			skb_shinfo(skb)->gso_segs = +				DIV_ROUND_UP(skb->len - hdrlen, mss); +		} + +		queue->stats.rx_bytes += skb->len; +		queue->stats.rx_packets++; + +		work_done++; + +		/* Set this flag right before netif_receive_skb, otherwise +		 * someone might think this packet already left netback, and +		 * do a skb_copy_ubufs while we are still in control of the +		 * skb. E.g. the __pskb_pull_tail earlier can do such thing. +		 */ +		if (skb_shinfo(skb)->destructor_arg) { +			skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; +			queue->stats.tx_zerocopy_sent++; +		} + +		netif_receive_skb(skb); +	} + +	return work_done; +} + +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) +{ +	unsigned long flags; +	pending_ring_idx_t index; +	struct xenvif_queue *queue = ubuf_to_queue(ubuf); + +	/* This is the only place where we grab this lock, to protect callbacks +	 * from each other. +	 */ +	spin_lock_irqsave(&queue->callback_lock, flags); +	do { +		u16 pending_idx = ubuf->desc; +		ubuf = (struct ubuf_info *) ubuf->ctx; +		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= +			MAX_PENDING_REQS); +		index = pending_index(queue->dealloc_prod); +		queue->dealloc_ring[index] = pending_idx; +		/* Sync with xenvif_tx_dealloc_action: +		 * insert idx then incr producer. +		 */ +		smp_wmb(); +		queue->dealloc_prod++; +	} while (ubuf); +	wake_up(&queue->dealloc_wq); +	spin_unlock_irqrestore(&queue->callback_lock, flags); + +	if (likely(zerocopy_success)) +		queue->stats.tx_zerocopy_success++; +	else +		queue->stats.tx_zerocopy_fail++; +} + +static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) +{ +	struct gnttab_unmap_grant_ref *gop; +	pending_ring_idx_t dc, dp; +	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; +	unsigned int i = 0; + +	dc = queue->dealloc_cons; +	gop = queue->tx_unmap_ops; + +	/* Free up any grants we have finished using */ +	do { +		dp = queue->dealloc_prod; + +		/* Ensure we see all indices enqueued by all +		 * xenvif_zerocopy_callback(). +		 */ +		smp_rmb(); + +		while (dc != dp) { +			BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); +			pending_idx = +				queue->dealloc_ring[pending_index(dc++)]; + +			pending_idx_release[gop-queue->tx_unmap_ops] = +				pending_idx; +			queue->pages_to_unmap[gop-queue->tx_unmap_ops] = +				queue->mmap_pages[pending_idx]; +			gnttab_set_unmap_op(gop, +					    idx_to_kaddr(queue, pending_idx), +					    GNTMAP_host_map, +					    queue->grant_tx_handle[pending_idx]); +			xenvif_grant_handle_reset(queue, pending_idx); +			++gop; +		} + +	} while (dp != queue->dealloc_prod); + +	queue->dealloc_cons = dc; + +	if (gop - queue->tx_unmap_ops > 0) { +		int ret; +		ret = gnttab_unmap_refs(queue->tx_unmap_ops, +					NULL, +					queue->pages_to_unmap, +					gop - queue->tx_unmap_ops); +		if (ret) { +			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n", +				   gop - queue->tx_unmap_ops, ret); +			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { +				if (gop[i].status != GNTST_okay) +					netdev_err(queue->vif->dev, +						   " host_addr: %llx handle: %x status: %d\n", +						   gop[i].host_addr, +						   gop[i].handle, +						   gop[i].status); +			} +			BUG(); +		} +	} + +	for (i = 0; i < gop - queue->tx_unmap_ops; ++i) +		xenvif_idx_release(queue, pending_idx_release[i], +				   XEN_NETIF_RSP_OKAY); +} + + +/* Called after netfront has transmitted */ +int xenvif_tx_action(struct xenvif_queue *queue, int budget) +{ +	unsigned nr_mops, nr_cops = 0; +	int work_done, ret; + +	if (unlikely(!tx_work_todo(queue))) +		return 0; + +	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); + +	if (nr_cops == 0) +		return 0; + +	gnttab_batch_copy(queue->tx_copy_ops, nr_cops); +	if (nr_mops != 0) { +		ret = gnttab_map_refs(queue->tx_map_ops, +				      NULL, +				      queue->pages_to_map, +				      nr_mops); +		BUG_ON(ret); +	} + +	work_done = xenvif_tx_submit(queue); + +	return work_done; +} + +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, +			       u8 status) +{ +	struct pending_tx_info *pending_tx_info; +	pending_ring_idx_t index; +	unsigned long flags; + +	pending_tx_info = &queue->pending_tx_info[pending_idx]; +	spin_lock_irqsave(&queue->response_lock, flags); +	make_tx_response(queue, &pending_tx_info->req, status); +	index = pending_index(queue->pending_prod); +	queue->pending_ring[index] = pending_idx; +	/* TX shouldn't use the index before we give it back here */ +	mb(); +	queue->pending_prod++; +	spin_unlock_irqrestore(&queue->response_lock, flags); +} + + +static void make_tx_response(struct xenvif_queue *queue, +			     struct xen_netif_tx_request *txp, +			     s8       st) +{ +	RING_IDX i = queue->tx.rsp_prod_pvt; +	struct xen_netif_tx_response *resp; +	int notify; + +	resp = RING_GET_RESPONSE(&queue->tx, i); +	resp->id     = txp->id; +	resp->status = st; + +	if (txp->flags & XEN_NETTXF_extra_info) +		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; + +	queue->tx.rsp_prod_pvt = ++i; +	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); +	if (notify) +		notify_remote_via_irq(queue->tx_irq); +} + +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, +					     u16      id, +					     s8       st, +					     u16      offset, +					     u16      size, +					     u16      flags) +{ +	RING_IDX i = queue->rx.rsp_prod_pvt; +	struct xen_netif_rx_response *resp; + +	resp = RING_GET_RESPONSE(&queue->rx, i); +	resp->offset     = offset; +	resp->flags      = flags; +	resp->id         = id; +	resp->status     = (s16)size; +	if (st < 0) +		resp->status = (s16)st; + +	queue->rx.rsp_prod_pvt = ++i; + +	return resp; +} + +void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) +{ +	int ret; +	struct gnttab_unmap_grant_ref tx_unmap_op; + +	gnttab_set_unmap_op(&tx_unmap_op, +			    idx_to_kaddr(queue, pending_idx), +			    GNTMAP_host_map, +			    queue->grant_tx_handle[pending_idx]); +	xenvif_grant_handle_reset(queue, pending_idx); + +	ret = gnttab_unmap_refs(&tx_unmap_op, NULL, +				&queue->mmap_pages[pending_idx], 1); +	if (ret) { +		netdev_err(queue->vif->dev, +			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", +			   ret, +			   pending_idx, +			   tx_unmap_op.host_addr, +			   tx_unmap_op.handle, +			   tx_unmap_op.status); +		BUG(); +	} +} + +static inline int rx_work_todo(struct xenvif_queue *queue) +{ +	return (!skb_queue_empty(&queue->rx_queue) && +	       xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) || +	       queue->rx_queue_purge; +} + +static inline int tx_work_todo(struct xenvif_queue *queue) +{ +	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) +		return 1; + +	return 0; +} + +static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) +{ +	return queue->dealloc_cons != queue->dealloc_prod; +} + +void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) +{ +	if (queue->tx.sring) +		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), +					queue->tx.sring); +	if (queue->rx.sring) +		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), +					queue->rx.sring); +} + +int xenvif_map_frontend_rings(struct xenvif_queue *queue, +			      grant_ref_t tx_ring_ref, +			      grant_ref_t rx_ring_ref) +{ +	void *addr; +	struct xen_netif_tx_sring *txs; +	struct xen_netif_rx_sring *rxs; + +	int err = -ENOMEM; + +	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), +				     tx_ring_ref, &addr); +	if (err) +		goto err; + +	txs = (struct xen_netif_tx_sring *)addr; +	BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); + +	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), +				     rx_ring_ref, &addr); +	if (err) +		goto err; + +	rxs = (struct xen_netif_rx_sring *)addr; +	BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE); + +	return 0; + +err: +	xenvif_unmap_frontend_rings(queue); +	return err; +} + +static void xenvif_start_queue(struct xenvif_queue *queue) +{ +	if (xenvif_schedulable(queue->vif)) +		xenvif_wake_queue(queue); +} + +int xenvif_kthread_guest_rx(void *data) +{ +	struct xenvif_queue *queue = data; +	struct sk_buff *skb; + +	while (!kthread_should_stop()) { +		wait_event_interruptible(queue->wq, +					 rx_work_todo(queue) || +					 queue->vif->disabled || +					 kthread_should_stop()); + +		/* This frontend is found to be rogue, disable it in +		 * kthread context. Currently this is only set when +		 * netback finds out frontend sends malformed packet, +		 * but we cannot disable the interface in softirq +		 * context so we defer it here, if this thread is +		 * associated with queue 0. +		 */ +		if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0)) +			xenvif_carrier_off(queue->vif); + +		if (kthread_should_stop()) +			break; + +		if (queue->rx_queue_purge) { +			skb_queue_purge(&queue->rx_queue); +			queue->rx_queue_purge = false; +		} + +		if (!skb_queue_empty(&queue->rx_queue)) +			xenvif_rx_action(queue); + +		if (skb_queue_empty(&queue->rx_queue) && +		    xenvif_queue_stopped(queue)) { +			del_timer_sync(&queue->wake_queue); +			xenvif_start_queue(queue); +		} + +		cond_resched(); +	} + +	/* Bin any remaining skbs */ +	while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) +		dev_kfree_skb(skb); + +	return 0; +} + +int xenvif_dealloc_kthread(void *data) +{ +	struct xenvif_queue *queue = data; + +	while (!kthread_should_stop()) { +		wait_event_interruptible(queue->dealloc_wq, +					 tx_dealloc_work_todo(queue) || +					 kthread_should_stop()); +		if (kthread_should_stop()) +			break; + +		xenvif_tx_dealloc_action(queue); +		cond_resched(); +	} + +	/* Unmap anything remaining*/ +	if (tx_dealloc_work_todo(queue)) +		xenvif_tx_dealloc_action(queue); + +	return 0; +} + +static int __init netback_init(void) +{ +	int rc = 0; + +	if (!xen_domain()) +		return -ENODEV; + +	/* Allow as many queues as there are CPUs, by default */ +	xenvif_max_queues = num_online_cpus(); + +	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { +		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", +			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); +		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; +	} + +	rc = xenvif_xenbus_init(); +	if (rc) +		goto failed_init; + +	rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); + +	return 0; + +failed_init: +	return rc; +} + +module_init(netback_init); + +static void __exit netback_fini(void) +{ +	xenvif_xenbus_fini(); +} +module_exit(netback_fini); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_ALIAS("xen-backend:vif"); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c new file mode 100644 index 00000000000..3d85acd84ba --- /dev/null +++ b/drivers/net/xen-netback/xenbus.c @@ -0,0 +1,786 @@ +/* + * Xenbus code for netif backend + * + * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> + * Copyright (C) 2005 XenSource Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "common.h" +#include <linux/vmalloc.h> +#include <linux/rtnetlink.h> + +struct backend_info { +	struct xenbus_device *dev; +	struct xenvif *vif; + +	/* This is the state that will be reflected in xenstore when any +	 * active hotplug script completes. +	 */ +	enum xenbus_state state; + +	enum xenbus_state frontend_state; +	struct xenbus_watch hotplug_status_watch; +	u8 have_hotplug_status_watch:1; +}; + +static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); +static void connect(struct backend_info *be); +static int read_xenbus_vif_flags(struct backend_info *be); +static void backend_create_xenvif(struct backend_info *be); +static void unregister_hotplug_status_watch(struct backend_info *be); +static void set_backend_state(struct backend_info *be, +			      enum xenbus_state state); + +static int netback_remove(struct xenbus_device *dev) +{ +	struct backend_info *be = dev_get_drvdata(&dev->dev); + +	set_backend_state(be, XenbusStateClosed); + +	unregister_hotplug_status_watch(be); +	if (be->vif) { +		kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); +		xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); +		xenvif_free(be->vif); +		be->vif = NULL; +	} +	kfree(be); +	dev_set_drvdata(&dev->dev, NULL); +	return 0; +} + + +/** + * Entry point to this code when a new device is created.  Allocate the basic + * structures and switch to InitWait. + */ +static int netback_probe(struct xenbus_device *dev, +			 const struct xenbus_device_id *id) +{ +	const char *message; +	struct xenbus_transaction xbt; +	int err; +	int sg; +	struct backend_info *be = kzalloc(sizeof(struct backend_info), +					  GFP_KERNEL); +	if (!be) { +		xenbus_dev_fatal(dev, -ENOMEM, +				 "allocating backend structure"); +		return -ENOMEM; +	} + +	be->dev = dev; +	dev_set_drvdata(&dev->dev, be); + +	sg = 1; + +	do { +		err = xenbus_transaction_start(&xbt); +		if (err) { +			xenbus_dev_fatal(dev, err, "starting transaction"); +			goto fail; +		} + +		err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg); +		if (err) { +			message = "writing feature-sg"; +			goto abort_transaction; +		} + +		err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", +				    "%d", sg); +		if (err) { +			message = "writing feature-gso-tcpv4"; +			goto abort_transaction; +		} + +		err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6", +				    "%d", sg); +		if (err) { +			message = "writing feature-gso-tcpv6"; +			goto abort_transaction; +		} + +		/* We support partial checksum setup for IPv6 packets */ +		err = xenbus_printf(xbt, dev->nodename, +				    "feature-ipv6-csum-offload", +				    "%d", 1); +		if (err) { +			message = "writing feature-ipv6-csum-offload"; +			goto abort_transaction; +		} + +		/* We support rx-copy path. */ +		err = xenbus_printf(xbt, dev->nodename, +				    "feature-rx-copy", "%d", 1); +		if (err) { +			message = "writing feature-rx-copy"; +			goto abort_transaction; +		} + +		/* +		 * We don't support rx-flip path (except old guests who don't +		 * grok this feature flag). +		 */ +		err = xenbus_printf(xbt, dev->nodename, +				    "feature-rx-flip", "%d", 0); +		if (err) { +			message = "writing feature-rx-flip"; +			goto abort_transaction; +		} + +		err = xenbus_transaction_end(xbt, 0); +	} while (err == -EAGAIN); + +	if (err) { +		xenbus_dev_fatal(dev, err, "completing transaction"); +		goto fail; +	} + +	/* +	 * Split event channels support, this is optional so it is not +	 * put inside the above loop. +	 */ +	err = xenbus_printf(XBT_NIL, dev->nodename, +			    "feature-split-event-channels", +			    "%u", separate_tx_rx_irq); +	if (err) +		pr_debug("Error writing feature-split-event-channels\n"); + +	/* Multi-queue support: This is an optional feature. */ +	err = xenbus_printf(XBT_NIL, dev->nodename, +			    "multi-queue-max-queues", "%u", xenvif_max_queues); +	if (err) +		pr_debug("Error writing multi-queue-max-queues\n"); + +	err = xenbus_switch_state(dev, XenbusStateInitWait); +	if (err) +		goto fail; + +	be->state = XenbusStateInitWait; + +	/* This kicks hotplug scripts, so do it immediately. */ +	backend_create_xenvif(be); + +	return 0; + +abort_transaction: +	xenbus_transaction_end(xbt, 1); +	xenbus_dev_fatal(dev, err, "%s", message); +fail: +	pr_debug("failed\n"); +	netback_remove(dev); +	return err; +} + + +/* + * Handle the creation of the hotplug script environment.  We add the script + * and vif variables to the environment, for the benefit of the vif-* hotplug + * scripts. + */ +static int netback_uevent(struct xenbus_device *xdev, +			  struct kobj_uevent_env *env) +{ +	struct backend_info *be = dev_get_drvdata(&xdev->dev); +	char *val; + +	val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); +	if (IS_ERR(val)) { +		int err = PTR_ERR(val); +		xenbus_dev_fatal(xdev, err, "reading script"); +		return err; +	} else { +		if (add_uevent_var(env, "script=%s", val)) { +			kfree(val); +			return -ENOMEM; +		} +		kfree(val); +	} + +	if (!be || !be->vif) +		return 0; + +	return add_uevent_var(env, "vif=%s", be->vif->dev->name); +} + + +static void backend_create_xenvif(struct backend_info *be) +{ +	int err; +	long handle; +	struct xenbus_device *dev = be->dev; + +	if (be->vif != NULL) +		return; + +	err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); +	if (err != 1) { +		xenbus_dev_fatal(dev, err, "reading handle"); +		return; +	} + +	be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); +	if (IS_ERR(be->vif)) { +		err = PTR_ERR(be->vif); +		be->vif = NULL; +		xenbus_dev_fatal(dev, err, "creating interface"); +		return; +	} + +	kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); +} + +static void backend_disconnect(struct backend_info *be) +{ +	if (be->vif) +		xenvif_disconnect(be->vif); +} + +static void backend_connect(struct backend_info *be) +{ +	if (be->vif) +		connect(be); +} + +static inline void backend_switch_state(struct backend_info *be, +					enum xenbus_state state) +{ +	struct xenbus_device *dev = be->dev; + +	pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state)); +	be->state = state; + +	/* If we are waiting for a hotplug script then defer the +	 * actual xenbus state change. +	 */ +	if (!be->have_hotplug_status_watch) +		xenbus_switch_state(dev, state); +} + +/* Handle backend state transitions: + * + * The backend state starts in InitWait and the following transitions are + * allowed. + * + * InitWait -> Connected + * + *    ^    \         | + *    |     \        | + *    |      \       | + *    |       \      | + *    |        \     | + *    |         \    | + *    |          V   V + * + *  Closed  <-> Closing + * + * The state argument specifies the eventual state of the backend and the + * function transitions to that state via the shortest path. + */ +static void set_backend_state(struct backend_info *be, +			      enum xenbus_state state) +{ +	while (be->state != state) { +		switch (be->state) { +		case XenbusStateClosed: +			switch (state) { +			case XenbusStateInitWait: +			case XenbusStateConnected: +				pr_info("%s: prepare for reconnect\n", +					be->dev->nodename); +				backend_switch_state(be, XenbusStateInitWait); +				break; +			case XenbusStateClosing: +				backend_switch_state(be, XenbusStateClosing); +				break; +			default: +				BUG(); +			} +			break; +		case XenbusStateInitWait: +			switch (state) { +			case XenbusStateConnected: +				backend_connect(be); +				backend_switch_state(be, XenbusStateConnected); +				break; +			case XenbusStateClosing: +			case XenbusStateClosed: +				backend_switch_state(be, XenbusStateClosing); +				break; +			default: +				BUG(); +			} +			break; +		case XenbusStateConnected: +			switch (state) { +			case XenbusStateInitWait: +			case XenbusStateClosing: +			case XenbusStateClosed: +				backend_disconnect(be); +				backend_switch_state(be, XenbusStateClosing); +				break; +			default: +				BUG(); +			} +			break; +		case XenbusStateClosing: +			switch (state) { +			case XenbusStateInitWait: +			case XenbusStateConnected: +			case XenbusStateClosed: +				backend_switch_state(be, XenbusStateClosed); +				break; +			default: +				BUG(); +			} +			break; +		default: +			BUG(); +		} +	} +} + +/** + * Callback received when the frontend's state changes. + */ +static void frontend_changed(struct xenbus_device *dev, +			     enum xenbus_state frontend_state) +{ +	struct backend_info *be = dev_get_drvdata(&dev->dev); + +	pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state)); + +	be->frontend_state = frontend_state; + +	switch (frontend_state) { +	case XenbusStateInitialising: +		set_backend_state(be, XenbusStateInitWait); +		break; + +	case XenbusStateInitialised: +		break; + +	case XenbusStateConnected: +		set_backend_state(be, XenbusStateConnected); +		break; + +	case XenbusStateClosing: +		set_backend_state(be, XenbusStateClosing); +		break; + +	case XenbusStateClosed: +		set_backend_state(be, XenbusStateClosed); +		if (xenbus_dev_is_online(dev)) +			break; +		/* fall through if not online */ +	case XenbusStateUnknown: +		set_backend_state(be, XenbusStateClosed); +		device_unregister(&dev->dev); +		break; + +	default: +		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", +				 frontend_state); +		break; +	} +} + + +static void xen_net_read_rate(struct xenbus_device *dev, +			      unsigned long *bytes, unsigned long *usec) +{ +	char *s, *e; +	unsigned long b, u; +	char *ratestr; + +	/* Default to unlimited bandwidth. */ +	*bytes = ~0UL; +	*usec = 0; + +	ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL); +	if (IS_ERR(ratestr)) +		return; + +	s = ratestr; +	b = simple_strtoul(s, &e, 10); +	if ((s == e) || (*e != ',')) +		goto fail; + +	s = e + 1; +	u = simple_strtoul(s, &e, 10); +	if ((s == e) || (*e != '\0')) +		goto fail; + +	*bytes = b; +	*usec = u; + +	kfree(ratestr); +	return; + + fail: +	pr_warn("Failed to parse network rate limit. Traffic unlimited.\n"); +	kfree(ratestr); +} + +static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) +{ +	char *s, *e, *macstr; +	int i; + +	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); +	if (IS_ERR(macstr)) +		return PTR_ERR(macstr); + +	for (i = 0; i < ETH_ALEN; i++) { +		mac[i] = simple_strtoul(s, &e, 16); +		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { +			kfree(macstr); +			return -ENOENT; +		} +		s = e+1; +	} + +	kfree(macstr); +	return 0; +} + +static void unregister_hotplug_status_watch(struct backend_info *be) +{ +	if (be->have_hotplug_status_watch) { +		unregister_xenbus_watch(&be->hotplug_status_watch); +		kfree(be->hotplug_status_watch.node); +	} +	be->have_hotplug_status_watch = 0; +} + +static void hotplug_status_changed(struct xenbus_watch *watch, +				   const char **vec, +				   unsigned int vec_size) +{ +	struct backend_info *be = container_of(watch, +					       struct backend_info, +					       hotplug_status_watch); +	char *str; +	unsigned int len; + +	str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len); +	if (IS_ERR(str)) +		return; +	if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { +		/* Complete any pending state change */ +		xenbus_switch_state(be->dev, be->state); + +		/* Not interested in this watch anymore. */ +		unregister_hotplug_status_watch(be); +	} +	kfree(str); +} + +static void connect(struct backend_info *be) +{ +	int err; +	struct xenbus_device *dev = be->dev; +	unsigned long credit_bytes, credit_usec; +	unsigned int queue_index; +	unsigned int requested_num_queues; +	struct xenvif_queue *queue; + +	/* Check whether the frontend requested multiple queues +	 * and read the number requested. +	 */ +	err = xenbus_scanf(XBT_NIL, dev->otherend, +			   "multi-queue-num-queues", +			   "%u", &requested_num_queues); +	if (err < 0) { +		requested_num_queues = 1; /* Fall back to single queue */ +	} else if (requested_num_queues > xenvif_max_queues) { +		/* buggy or malicious guest */ +		xenbus_dev_fatal(dev, err, +				 "guest requested %u queues, exceeding the maximum of %u.", +				 requested_num_queues, xenvif_max_queues); +		return; +	} + +	err = xen_net_read_mac(dev, be->vif->fe_dev_addr); +	if (err) { +		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); +		return; +	} + +	xen_net_read_rate(dev, &credit_bytes, &credit_usec); +	read_xenbus_vif_flags(be); + +	/* Use the number of queues requested by the frontend */ +	be->vif->queues = vzalloc(requested_num_queues * +				  sizeof(struct xenvif_queue)); +	be->vif->num_queues = requested_num_queues; + +	for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { +		queue = &be->vif->queues[queue_index]; +		queue->vif = be->vif; +		queue->id = queue_index; +		snprintf(queue->name, sizeof(queue->name), "%s-q%u", +				be->vif->dev->name, queue->id); + +		err = xenvif_init_queue(queue); +		if (err) { +			/* xenvif_init_queue() cleans up after itself on +			 * failure, but we need to clean up any previously +			 * initialised queues. Set num_queues to i so that +			 * earlier queues can be destroyed using the regular +			 * disconnect logic. +			 */ +			be->vif->num_queues = queue_index; +			goto err; +		} + +		queue->remaining_credit = credit_bytes; + +		err = connect_rings(be, queue); +		if (err) { +			/* connect_rings() cleans up after itself on failure, +			 * but we need to clean up after xenvif_init_queue() here, +			 * and also clean up any previously initialised queues. +			 */ +			xenvif_deinit_queue(queue); +			be->vif->num_queues = queue_index; +			goto err; +		} +	} + +	/* Initialisation completed, tell core driver the number of +	 * active queues. +	 */ +	rtnl_lock(); +	netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues); +	netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues); +	rtnl_unlock(); + +	xenvif_carrier_on(be->vif); + +	unregister_hotplug_status_watch(be); +	err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, +				   hotplug_status_changed, +				   "%s/%s", dev->nodename, "hotplug-status"); +	if (!err) +		be->have_hotplug_status_watch = 1; + +	netif_tx_wake_all_queues(be->vif->dev); + +	return; + +err: +	if (be->vif->num_queues > 0) +		xenvif_disconnect(be->vif); /* Clean up existing queues */ +	vfree(be->vif->queues); +	be->vif->queues = NULL; +	be->vif->num_queues = 0; +	return; +} + + +static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) +{ +	struct xenbus_device *dev = be->dev; +	unsigned int num_queues = queue->vif->num_queues; +	unsigned long tx_ring_ref, rx_ring_ref; +	unsigned int tx_evtchn, rx_evtchn; +	int err; +	char *xspath; +	size_t xspathsize; +	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ + +	/* If the frontend requested 1 queue, or we have fallen back +	 * to single queue due to lack of frontend support for multi- +	 * queue, expect the remaining XenStore keys in the toplevel +	 * directory. Otherwise, expect them in a subdirectory called +	 * queue-N. +	 */ +	if (num_queues == 1) { +		xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL); +		if (!xspath) { +			xenbus_dev_fatal(dev, -ENOMEM, +					 "reading ring references"); +			return -ENOMEM; +		} +		strcpy(xspath, dev->otherend); +	} else { +		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; +		xspath = kzalloc(xspathsize, GFP_KERNEL); +		if (!xspath) { +			xenbus_dev_fatal(dev, -ENOMEM, +					 "reading ring references"); +			return -ENOMEM; +		} +		snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, +			 queue->id); +	} + +	err = xenbus_gather(XBT_NIL, xspath, +			    "tx-ring-ref", "%lu", &tx_ring_ref, +			    "rx-ring-ref", "%lu", &rx_ring_ref, NULL); +	if (err) { +		xenbus_dev_fatal(dev, err, +				 "reading %s/ring-ref", +				 xspath); +		goto err; +	} + +	/* Try split event channels first, then single event channel. */ +	err = xenbus_gather(XBT_NIL, xspath, +			    "event-channel-tx", "%u", &tx_evtchn, +			    "event-channel-rx", "%u", &rx_evtchn, NULL); +	if (err < 0) { +		err = xenbus_scanf(XBT_NIL, xspath, +				   "event-channel", "%u", &tx_evtchn); +		if (err < 0) { +			xenbus_dev_fatal(dev, err, +					 "reading %s/event-channel(-tx/rx)", +					 xspath); +			goto err; +		} +		rx_evtchn = tx_evtchn; +	} + +	/* Map the shared frame, irq etc. */ +	err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, +			     tx_evtchn, rx_evtchn); +	if (err) { +		xenbus_dev_fatal(dev, err, +				 "mapping shared-frames %lu/%lu port tx %u rx %u", +				 tx_ring_ref, rx_ring_ref, +				 tx_evtchn, rx_evtchn); +		goto err; +	} + +	err = 0; +err: /* Regular return falls through with err == 0 */ +	kfree(xspath); +	return err; +} + +static int read_xenbus_vif_flags(struct backend_info *be) +{ +	struct xenvif *vif = be->vif; +	struct xenbus_device *dev = be->dev; +	unsigned int rx_copy; +	int err, val; + +	err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", +			   &rx_copy); +	if (err == -ENOENT) { +		err = 0; +		rx_copy = 0; +	} +	if (err < 0) { +		xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", +				 dev->otherend); +		return err; +	} +	if (!rx_copy) +		return -EOPNOTSUPP; + +	if (vif->dev->tx_queue_len != 0) { +		if (xenbus_scanf(XBT_NIL, dev->otherend, +				 "feature-rx-notify", "%d", &val) < 0) +			val = 0; +		if (val) +			vif->can_queue = 1; +		else +			/* Must be non-zero for pfifo_fast to work. */ +			vif->dev->tx_queue_len = 1; +	} + +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", +			 "%d", &val) < 0) +		val = 0; +	vif->can_sg = !!val; + +	vif->gso_mask = 0; +	vif->gso_prefix_mask = 0; + +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", +			 "%d", &val) < 0) +		val = 0; +	if (val) +		vif->gso_mask |= GSO_BIT(TCPV4); + +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", +			 "%d", &val) < 0) +		val = 0; +	if (val) +		vif->gso_prefix_mask |= GSO_BIT(TCPV4); + +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", +			 "%d", &val) < 0) +		val = 0; +	if (val) +		vif->gso_mask |= GSO_BIT(TCPV6); + +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", +			 "%d", &val) < 0) +		val = 0; +	if (val) +		vif->gso_prefix_mask |= GSO_BIT(TCPV6); + +	if (vif->gso_mask & vif->gso_prefix_mask) { +		xenbus_dev_fatal(dev, err, +				 "%s: gso and gso prefix flags are not " +				 "mutually exclusive", +				 dev->otherend); +		return -EOPNOTSUPP; +	} + +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", +			 "%d", &val) < 0) +		val = 0; +	vif->ip_csum = !val; + +	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", +			 "%d", &val) < 0) +		val = 0; +	vif->ipv6_csum = !!val; + +	return 0; +} + + +/* ** Driver Registration ** */ + + +static const struct xenbus_device_id netback_ids[] = { +	{ "vif" }, +	{ "" } +}; + + +static DEFINE_XENBUS_DRIVER(netback, , +	.probe = netback_probe, +	.remove = netback_remove, +	.uevent = netback_uevent, +	.otherend_changed = frontend_changed, +); + +int xenvif_xenbus_init(void) +{ +	return xenbus_register_backend(&netback_driver); +} + +void xenvif_xenbus_fini(void) +{ +	return xenbus_unregister_driver(&netback_driver); +}  | 
