diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 1666 | 
1 files changed, 1028 insertions, 638 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 956130c7003..c65b636bcab 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -37,6 +37,7 @@  #include <linux/kthread.h>  #include <linux/if_vlan.h>  #include <linux/udp.h> +#include <linux/highmem.h>  #include <net/tcp.h> @@ -54,6 +55,18 @@  bool separate_tx_rx_irq = 1;  module_param(separate_tx_rx_irq, bool, 0644); +/* When guest ring is filled up, qdisc queues the packets for us, but we have + * to timeout them, otherwise other guests' packets can get stuck there + */ +unsigned int rx_drain_timeout_msecs = 10000; +module_param(rx_drain_timeout_msecs, uint, 0444); +unsigned int rx_drain_timeout_jiffies; + +unsigned int xenvif_max_queues; +module_param_named(max_queues, xenvif_max_queues, uint, 0644); +MODULE_PARM_DESC(max_queues, +		 "Maximum number of queues per virtual interface"); +  /*   * This is the maximum slots a skb can have. If a guest sends a skb   * which exceeds this limit it is considered malicious. @@ -62,62 +75,56 @@ module_param(separate_tx_rx_irq, bool, 0644);  static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;  module_param(fatal_skb_slots, uint, 0444); -/* - * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating - * the maximum slots a valid packet can use. Now this value is defined - * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by - * all backend. - */ -#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN - -/* - * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of - * one or more merged tx requests, otherwise it is the continuation of - * previous tx request. - */ -static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx) -{ -	return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; -} - -static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,  			       u8 status); -static void make_tx_response(struct xenvif *vif, +static void make_tx_response(struct xenvif_queue *queue,  			     struct xen_netif_tx_request *txp,  			     s8       st); -static inline int tx_work_todo(struct xenvif *vif); -static inline int rx_work_todo(struct xenvif *vif); +static inline int tx_work_todo(struct xenvif_queue *queue); +static inline int rx_work_todo(struct xenvif_queue *queue); -static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,  					     u16      id,  					     s8       st,  					     u16      offset,  					     u16      size,  					     u16      flags); -static inline unsigned long idx_to_pfn(struct xenvif *vif, +static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,  				       u16 idx)  { -	return page_to_pfn(vif->mmap_pages[idx]); +	return page_to_pfn(queue->mmap_pages[idx]);  } -static inline unsigned long idx_to_kaddr(struct xenvif *vif, +static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,  					 u16 idx)  { -	return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); +	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));  } -/* - * This is the amount of packet we copy rather than map, so that the - * guest can't fiddle with the contents of the headers while we do - * packet processing on them (netfilter, routing, etc). +#define callback_param(vif, pending_idx) \ +	(vif->pending_tx_info[pending_idx].callback_struct) + +/* Find the containing VIF's structure from a pointer in pending_tx_info array   */ -#define PKT_PROT_LEN    (ETH_HLEN + \ -			 VLAN_HLEN + \ -			 sizeof(struct iphdr) + MAX_IPOPTLEN + \ -			 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) +static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) +{ +	u16 pending_idx = ubuf->desc; +	struct pending_tx_info *temp = +		container_of(ubuf, struct pending_tx_info, callback_struct); +	return container_of(temp - pending_idx, +			    struct xenvif_queue, +			    pending_tx_info[0]); +} + +/* This is a miniumum size for the linear area to avoid lots of + * calls to __pskb_pull_tail() as we set up checksum offsets. The + * value 128 was chosen as it covers all IPv4 and most likely + * IPv6 headers. + */ +#define PKT_PROT_LEN 128  static u16 frag_get_pending_idx(skb_frag_t *frag)  { @@ -134,42 +141,26 @@ static inline pending_ring_idx_t pending_index(unsigned i)  	return i & (MAX_PENDING_REQS-1);  } -static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) -{ -	return MAX_PENDING_REQS - -		vif->pending_prod + vif->pending_cons; -} - -static int max_required_rx_slots(struct xenvif *vif) +bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)  { -	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); - -	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ -	if (vif->can_sg || vif->gso || vif->gso_prefix) -		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ +	RING_IDX prod, cons; -	return max; -} - -int xenvif_rx_ring_full(struct xenvif *vif) -{ -	RING_IDX peek   = vif->rx_req_cons_peek; -	RING_IDX needed = max_required_rx_slots(vif); +	do { +		prod = queue->rx.sring->req_prod; +		cons = queue->rx.req_cons; -	return ((vif->rx.sring->req_prod - peek) < needed) || -	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); -} +		if (prod - cons >= needed) +			return true; -int xenvif_must_stop_queue(struct xenvif *vif) -{ -	if (!xenvif_rx_ring_full(vif)) -		return 0; +		queue->rx.sring->req_event = prod + 1; -	vif->rx.sring->req_event = vif->rx_req_cons_peek + -		max_required_rx_slots(vif); -	mb(); /* request notification /then/ check the queue */ +		/* Make sure event is visible before we check prod +		 * again. +		 */ +		mb(); +	} while (queue->rx.sring->req_prod != prod); -	return xenvif_rx_ring_full(vif); +	return false;  }  /* @@ -177,7 +168,8 @@ int xenvif_must_stop_queue(struct xenvif *vif)   * adding 'size' bytes to a buffer which currently contains 'offset'   * bytes.   */ -static bool start_new_rx_buffer(int offset, unsigned long size, int head) +static bool start_new_rx_buffer(int offset, unsigned long size, int head, +				bool full_coalesce)  {  	/* simple case: we have completely filled the current buffer. */  	if (offset == MAX_BUFFER_OFFSET) @@ -189,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)  	 *     (i)   this frag would fit completely in the next buffer  	 * and (ii)  there is already some data in the current buffer  	 * and (iii) this is not the head buffer. +	 * and (iv)  there is no need to fully utilize the buffers  	 *  	 * Where:  	 * - (i) stops us splitting a frag into two copies @@ -199,72 +192,22 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)  	 *   by (ii) but is explicitly checked because  	 *   netfront relies on the first buffer being  	 *   non-empty and can crash otherwise. +	 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS +	 *   slot  	 *  	 * This means we will effectively linearise small  	 * frags but do not needlessly split large buffers  	 * into multiple copies tend to give large frags their  	 * own buffers as before.  	 */ -	if ((offset + size > MAX_BUFFER_OFFSET) && -	    (size <= MAX_BUFFER_OFFSET) && offset && !head) +	BUG_ON(size > MAX_BUFFER_OFFSET); +	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head && +	    !full_coalesce)  		return true;  	return false;  } -/* - * Figure out how many ring slots we're going to need to send @skb to - * the guest. This function is essentially a dry run of - * xenvif_gop_frag_copy. - */ -unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) -{ -	unsigned int count; -	int i, copy_off; - -	count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); - -	copy_off = skb_headlen(skb) % PAGE_SIZE; - -	if (skb_shinfo(skb)->gso_size) -		count++; - -	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { -		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); -		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; -		unsigned long bytes; - -		offset &= ~PAGE_MASK; - -		while (size > 0) { -			BUG_ON(offset >= PAGE_SIZE); -			BUG_ON(copy_off > MAX_BUFFER_OFFSET); - -			bytes = PAGE_SIZE - offset; - -			if (bytes > size) -				bytes = size; - -			if (start_new_rx_buffer(copy_off, bytes, 0)) { -				count++; -				copy_off = 0; -			} - -			if (copy_off + bytes > MAX_BUFFER_OFFSET) -				bytes = MAX_BUFFER_OFFSET - copy_off; - -			copy_off += bytes; - -			offset += bytes; -			size -= bytes; - -			if (offset == PAGE_SIZE) -				offset = 0; -		} -	} -	return count; -} -  struct netrx_pending_operations {  	unsigned copy_prod, copy_cons;  	unsigned meta_prod, meta_cons; @@ -274,15 +217,16 @@ struct netrx_pending_operations {  	grant_ref_t copy_gref;  }; -static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, +static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,  						 struct netrx_pending_operations *npo)  {  	struct xenvif_rx_meta *meta;  	struct xen_netif_rx_request *req; -	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); +	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);  	meta = npo->meta + npo->meta_prod++; +	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;  	meta->gso_size = 0;  	meta->size = 0;  	meta->id = req->id; @@ -293,18 +237,28 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,  	return meta;  } +struct xenvif_rx_cb { +	int meta_slots_used; +	bool full_coalesce; +}; + +#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) +  /*   * Set up the grant operations for this fragment. If it's a flipping   * interface, we also set up the unmap request from here.   */ -static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, +static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,  				 struct netrx_pending_operations *npo,  				 struct page *page, unsigned long size, -				 unsigned long offset, int *head) +				 unsigned long offset, int *head, +				 struct xenvif_queue *foreign_queue, +				 grant_ref_t foreign_gref)  {  	struct gnttab_copy *copy_gop;  	struct xenvif_rx_meta *meta;  	unsigned long bytes; +	int gso_type = XEN_NETIF_GSO_TYPE_NONE;  	/* Data must not cross a page boundary. */  	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); @@ -324,14 +278,17 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,  		if (bytes > size)  			bytes = size; -		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { +		if (start_new_rx_buffer(npo->copy_off, +					bytes, +					*head, +					XENVIF_RX_CB(skb)->full_coalesce)) {  			/*  			 * Netfront requires there to be some data in the head  			 * buffer.  			 */  			BUG_ON(*head); -			meta = get_next_rx_buffer(vif, npo); +			meta = get_next_rx_buffer(queue, npo);  		}  		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) @@ -341,11 +298,18 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,  		copy_gop->flags = GNTCOPY_dest_gref;  		copy_gop->len = bytes; -		copy_gop->source.domid = DOMID_SELF; -		copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); +		if (foreign_queue) { +			copy_gop->source.domid = foreign_queue->vif->domid; +			copy_gop->source.u.ref = foreign_gref; +			copy_gop->flags |= GNTCOPY_source_gref; +		} else { +			copy_gop->source.domid = DOMID_SELF; +			copy_gop->source.u.gmfn = +				virt_to_mfn(page_address(page)); +		}  		copy_gop->source.offset = offset; -		copy_gop->dest.domid = vif->domid; +		copy_gop->dest.domid = queue->vif->domid;  		copy_gop->dest.offset = npo->copy_off;  		copy_gop->dest.u.ref = npo->copy_gref; @@ -363,8 +327,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,  		}  		/* Leave a gap for the GSO descriptor. */ -		if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) -			vif->rx.req_cons++; +		if (skb_is_gso(skb)) { +			if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) +				gso_type = XEN_NETIF_GSO_TYPE_TCPV4; +			else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) +				gso_type = XEN_NETIF_GSO_TYPE_TCPV6; +		} + +		if (*head && ((1 << gso_type) & queue->vif->gso_mask)) +			queue->rx.req_cons++;  		*head = 0; /* There must be something in this buffer now. */ @@ -372,6 +343,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,  }  /* + * Find the grant ref for a given frag in a chain of struct ubuf_info's + * skb: the skb itself + * i: the frag's number + * ubuf: a pointer to an element in the chain. It should not be NULL + * + * Returns a pointer to the element in the chain where the page were found. If + * not found, returns NULL. + * See the definition of callback_struct in common.h for more details about + * the chain. + */ +static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, +						const int i, +						const struct ubuf_info *ubuf) +{ +	struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf); + +	do { +		u16 pending_idx = ubuf->desc; + +		if (skb_shinfo(skb)->frags[i].page.p == +		    foreign_queue->mmap_pages[pending_idx]) +			break; +		ubuf = (struct ubuf_info *) ubuf->ctx; +	} while (ubuf); + +	return ubuf; +} + +/*   * Prepare an SKB to be transmitted to the frontend.   *   * This function is responsible for allocating grant operations, meta @@ -384,7 +384,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,   * frontend-side LRO).   */  static int xenvif_gop_skb(struct sk_buff *skb, -			  struct netrx_pending_operations *npo) +			  struct netrx_pending_operations *npo, +			  struct xenvif_queue *queue)  {  	struct xenvif *vif = netdev_priv(skb->dev);  	int nr_frags = skb_shinfo(skb)->nr_frags; @@ -394,25 +395,40 @@ static int xenvif_gop_skb(struct sk_buff *skb,  	unsigned char *data;  	int head = 1;  	int old_meta_prod; +	int gso_type; +	const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; +	const struct ubuf_info *const head_ubuf = ubuf;  	old_meta_prod = npo->meta_prod; +	gso_type = XEN_NETIF_GSO_TYPE_NONE; +	if (skb_is_gso(skb)) { +		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) +			gso_type = XEN_NETIF_GSO_TYPE_TCPV4; +		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) +			gso_type = XEN_NETIF_GSO_TYPE_TCPV6; +	} +  	/* Set up a GSO prefix descriptor, if necessary */ -	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { -		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); +	if ((1 << gso_type) & vif->gso_prefix_mask) { +		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);  		meta = npo->meta + npo->meta_prod++; +		meta->gso_type = gso_type;  		meta->gso_size = skb_shinfo(skb)->gso_size;  		meta->size = 0;  		meta->id = req->id;  	} -	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); +	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);  	meta = npo->meta + npo->meta_prod++; -	if (!vif->gso_prefix) +	if ((1 << gso_type) & vif->gso_mask) { +		meta->gso_type = gso_type;  		meta->gso_size = skb_shinfo(skb)->gso_size; -	else +	} else { +		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;  		meta->gso_size = 0; +	}  	meta->size = 0;  	meta->id = req->id; @@ -427,17 +443,69 @@ static int xenvif_gop_skb(struct sk_buff *skb,  		if (data + len > skb_tail_pointer(skb))  			len = skb_tail_pointer(skb) - data; -		xenvif_gop_frag_copy(vif, skb, npo, -				     virt_to_page(data), len, offset, &head); +		xenvif_gop_frag_copy(queue, skb, npo, +				     virt_to_page(data), len, offset, &head, +				     NULL, +				     0);  		data += len;  	}  	for (i = 0; i < nr_frags; i++) { -		xenvif_gop_frag_copy(vif, skb, npo, +		/* This variable also signals whether foreign_gref has a real +		 * value or not. +		 */ +		struct xenvif_queue *foreign_queue = NULL; +		grant_ref_t foreign_gref; + +		if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && +			(ubuf->callback == &xenvif_zerocopy_callback)) { +			const struct ubuf_info *const startpoint = ubuf; + +			/* Ideally ubuf points to the chain element which +			 * belongs to this frag. Or if frags were removed from +			 * the beginning, then shortly before it. +			 */ +			ubuf = xenvif_find_gref(skb, i, ubuf); + +			/* Try again from the beginning of the list, if we +			 * haven't tried from there. This only makes sense in +			 * the unlikely event of reordering the original frags. +			 * For injected local pages it's an unnecessary second +			 * run. +			 */ +			if (unlikely(!ubuf) && startpoint != head_ubuf) +				ubuf = xenvif_find_gref(skb, i, head_ubuf); + +			if (likely(ubuf)) { +				u16 pending_idx = ubuf->desc; + +				foreign_queue = ubuf_to_queue(ubuf); +				foreign_gref = +					foreign_queue->pending_tx_info[pending_idx].req.gref; +				/* Just a safety measure. If this was the last +				 * element on the list, the for loop will +				 * iterate again if a local page were added to +				 * the end. Using head_ubuf here prevents the +				 * second search on the chain. Or the original +				 * frags changed order, but that's less likely. +				 * In any way, ubuf shouldn't be NULL. +				 */ +				ubuf = ubuf->ctx ? +					(struct ubuf_info *) ubuf->ctx : +					head_ubuf; +			} else +				/* This frag was a local page, added to the +				 * array after the skb left netback. +				 */ +				ubuf = head_ubuf; +		} +		xenvif_gop_frag_copy(queue, skb, npo,  				     skb_frag_page(&skb_shinfo(skb)->frags[i]),  				     skb_frag_size(&skb_shinfo(skb)->frags[i]),  				     skb_shinfo(skb)->frags[i].page_offset, -				     &head); +				     &head, +				     foreign_queue, +				     foreign_queue ? foreign_gref : UINT_MAX);  	}  	return npo->meta_prod - old_meta_prod; @@ -469,7 +537,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,  	return status;  } -static void xenvif_add_frag_responses(struct xenvif *vif, int status, +static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,  				      struct xenvif_rx_meta *meta,  				      int nr_meta_slots)  { @@ -490,21 +558,17 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,  			flags = XEN_NETRXF_more_data;  		offset = 0; -		make_rx_response(vif, meta[i].id, status, offset, +		make_rx_response(queue, meta[i].id, status, offset,  				 meta[i].size, flags);  	}  } -struct skb_cb_overlay { -	int meta_slots_used; -}; - -static void xenvif_kick_thread(struct xenvif *vif) +void xenvif_kick_thread(struct xenvif_queue *queue)  { -	wake_up(&vif->wq); +	wake_up(&queue->wq);  } -void xenvif_rx_action(struct xenvif *vif) +static void xenvif_rx_action(struct xenvif_queue *queue)  {  	s8 status;  	u16 flags; @@ -513,72 +577,115 @@ void xenvif_rx_action(struct xenvif *vif)  	struct sk_buff *skb;  	LIST_HEAD(notify);  	int ret; -	int nr_frags; -	int count;  	unsigned long offset; -	struct skb_cb_overlay *sco; -	int need_to_notify = 0; +	bool need_to_notify = false;  	struct netrx_pending_operations npo = { -		.copy  = vif->grant_copy_op, -		.meta  = vif->meta, +		.copy  = queue->grant_copy_op, +		.meta  = queue->meta,  	};  	skb_queue_head_init(&rxq); -	count = 0; +	while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { +		RING_IDX max_slots_needed; +		RING_IDX old_req_cons; +		RING_IDX ring_slots_used; +		int i; -	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { -		vif = netdev_priv(skb->dev); -		nr_frags = skb_shinfo(skb)->nr_frags; +		/* We need a cheap worse case estimate for the number of +		 * slots we'll use. +		 */ -		sco = (struct skb_cb_overlay *)skb->cb; -		sco->meta_slots_used = xenvif_gop_skb(skb, &npo); +		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + +						skb_headlen(skb), +						PAGE_SIZE); +		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { +			unsigned int size; +			unsigned int offset; -		count += nr_frags + 1; +			size = skb_frag_size(&skb_shinfo(skb)->frags[i]); +			offset = skb_shinfo(skb)->frags[i].page_offset; -		__skb_queue_tail(&rxq, skb); +			/* For a worse-case estimate we need to factor in +			 * the fragment page offset as this will affect the +			 * number of times xenvif_gop_frag_copy() will +			 * call start_new_rx_buffer(). +			 */ +			max_slots_needed += DIV_ROUND_UP(offset + size, +							 PAGE_SIZE); +		} -		/* Filled the batch queue? */ -		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ -		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) +		/* To avoid the estimate becoming too pessimal for some +		 * frontends that limit posted rx requests, cap the estimate +		 * at MAX_SKB_FRAGS. In this case netback will fully coalesce +		 * the skb into the provided slots. +		 */ +		if (max_slots_needed > MAX_SKB_FRAGS) { +			max_slots_needed = MAX_SKB_FRAGS; +			XENVIF_RX_CB(skb)->full_coalesce = true; +		} else { +			XENVIF_RX_CB(skb)->full_coalesce = false; +		} + +		/* We may need one more slot for GSO metadata */ +		if (skb_is_gso(skb) && +		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || +		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) +			max_slots_needed++; + +		/* If the skb may not fit then bail out now */ +		if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) { +			skb_queue_head(&queue->rx_queue, skb); +			need_to_notify = true; +			queue->rx_last_skb_slots = max_slots_needed;  			break; +		} else +			queue->rx_last_skb_slots = 0; + +		old_req_cons = queue->rx.req_cons; +		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); +		ring_slots_used = queue->rx.req_cons - old_req_cons; + +		BUG_ON(ring_slots_used > max_slots_needed); + +		__skb_queue_tail(&rxq, skb);  	} -	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); +	BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));  	if (!npo.copy_prod) -		return; +		goto done; -	BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); -	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); +	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); +	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);  	while ((skb = __skb_dequeue(&rxq)) != NULL) { -		sco = (struct skb_cb_overlay *)skb->cb; - -		vif = netdev_priv(skb->dev); -		if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { -			resp = RING_GET_RESPONSE(&vif->rx, -						 vif->rx.rsp_prod_pvt++); +		if ((1 << queue->meta[npo.meta_cons].gso_type) & +		    queue->vif->gso_prefix_mask) { +			resp = RING_GET_RESPONSE(&queue->rx, +						 queue->rx.rsp_prod_pvt++);  			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; -			resp->offset = vif->meta[npo.meta_cons].gso_size; -			resp->id = vif->meta[npo.meta_cons].id; -			resp->status = sco->meta_slots_used; +			resp->offset = queue->meta[npo.meta_cons].gso_size; +			resp->id = queue->meta[npo.meta_cons].id; +			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;  			npo.meta_cons++; -			sco->meta_slots_used--; +			XENVIF_RX_CB(skb)->meta_slots_used--;  		} -		vif->dev->stats.tx_bytes += skb->len; -		vif->dev->stats.tx_packets++; +		queue->stats.tx_bytes += skb->len; +		queue->stats.tx_packets++; -		status = xenvif_check_gop(vif, sco->meta_slots_used, &npo); +		status = xenvif_check_gop(queue->vif, +					  XENVIF_RX_CB(skb)->meta_slots_used, +					  &npo); -		if (sco->meta_slots_used == 1) +		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)  			flags = 0;  		else  			flags = XEN_NETRXF_more_data; @@ -590,21 +697,22 @@ void xenvif_rx_action(struct xenvif *vif)  			flags |= XEN_NETRXF_data_validated;  		offset = 0; -		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, +		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,  					status, offset, -					vif->meta[npo.meta_cons].size, +					queue->meta[npo.meta_cons].size,  					flags); -		if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { +		if ((1 << queue->meta[npo.meta_cons].gso_type) & +		    queue->vif->gso_mask) {  			struct xen_netif_extra_info *gso =  				(struct xen_netif_extra_info *) -				RING_GET_RESPONSE(&vif->rx, -						  vif->rx.rsp_prod_pvt++); +				RING_GET_RESPONSE(&queue->rx, +						  queue->rx.rsp_prod_pvt++);  			resp->flags |= XEN_NETRXF_extra_info; -			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; -			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; +			gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; +			gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;  			gso->u.gso.pad = 0;  			gso->u.gso.features = 0; @@ -612,47 +720,34 @@ void xenvif_rx_action(struct xenvif *vif)  			gso->flags = 0;  		} -		xenvif_add_frag_responses(vif, status, -					  vif->meta + npo.meta_cons + 1, -					  sco->meta_slots_used); +		xenvif_add_frag_responses(queue, status, +					  queue->meta + npo.meta_cons + 1, +					  XENVIF_RX_CB(skb)->meta_slots_used); -		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); +		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); -		if (ret) -			need_to_notify = 1; +		need_to_notify |= !!ret; -		xenvif_notify_tx_completion(vif); - -		npo.meta_cons += sco->meta_slots_used; +		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;  		dev_kfree_skb(skb);  	} +done:  	if (need_to_notify) -		notify_remote_via_irq(vif->rx_irq); - -	/* More work to do? */ -	if (!skb_queue_empty(&vif->rx_queue)) -		xenvif_kick_thread(vif); +		notify_remote_via_irq(queue->rx_irq);  } -void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) -{ -	skb_queue_tail(&vif->rx_queue, skb); - -	xenvif_kick_thread(vif); -} - -void xenvif_check_rx_xenvif(struct xenvif *vif) +void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)  {  	int more_to_do; -	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); +	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);  	if (more_to_do) -		napi_schedule(&vif->napi); +		napi_schedule(&queue->napi);  } -static void tx_add_credit(struct xenvif *vif) +static void tx_add_credit(struct xenvif_queue *queue)  {  	unsigned long max_burst, max_credit; @@ -660,51 +755,57 @@ static void tx_add_credit(struct xenvif *vif)  	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.  	 * Otherwise the interface can seize up due to insufficient credit.  	 */ -	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; +	max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;  	max_burst = min(max_burst, 131072UL); -	max_burst = max(max_burst, vif->credit_bytes); +	max_burst = max(max_burst, queue->credit_bytes);  	/* Take care that adding a new chunk of credit doesn't wrap to zero. */ -	max_credit = vif->remaining_credit + vif->credit_bytes; -	if (max_credit < vif->remaining_credit) +	max_credit = queue->remaining_credit + queue->credit_bytes; +	if (max_credit < queue->remaining_credit)  		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ -	vif->remaining_credit = min(max_credit, max_burst); +	queue->remaining_credit = min(max_credit, max_burst);  }  static void tx_credit_callback(unsigned long data)  { -	struct xenvif *vif = (struct xenvif *)data; -	tx_add_credit(vif); -	xenvif_check_rx_xenvif(vif); +	struct xenvif_queue *queue = (struct xenvif_queue *)data; +	tx_add_credit(queue); +	xenvif_napi_schedule_or_enable_events(queue);  } -static void xenvif_tx_err(struct xenvif *vif, +static void xenvif_tx_err(struct xenvif_queue *queue,  			  struct xen_netif_tx_request *txp, RING_IDX end)  { -	RING_IDX cons = vif->tx.req_cons; +	RING_IDX cons = queue->tx.req_cons; +	unsigned long flags;  	do { -		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); +		spin_lock_irqsave(&queue->response_lock, flags); +		make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); +		spin_unlock_irqrestore(&queue->response_lock, flags);  		if (cons == end)  			break; -		txp = RING_GET_REQUEST(&vif->tx, cons++); +		txp = RING_GET_REQUEST(&queue->tx, cons++);  	} while (1); -	vif->tx.req_cons = cons; +	queue->tx.req_cons = cons;  }  static void xenvif_fatal_tx_err(struct xenvif *vif)  {  	netdev_err(vif->dev, "fatal error; disabling device\n"); -	xenvif_carrier_off(vif); +	vif->disabled = true; +	/* Disable the vif from queue 0's kthread */ +	if (vif->queues) +		xenvif_kick_thread(&vif->queues[0]);  } -static int xenvif_count_requests(struct xenvif *vif, +static int xenvif_count_requests(struct xenvif_queue *queue,  				 struct xen_netif_tx_request *first,  				 struct xen_netif_tx_request *txp,  				 int work_to_do)  { -	RING_IDX cons = vif->tx.req_cons; +	RING_IDX cons = queue->tx.req_cons;  	int slots = 0;  	int drop_err = 0;  	int more_data; @@ -716,10 +817,10 @@ static int xenvif_count_requests(struct xenvif *vif,  		struct xen_netif_tx_request dropped_tx = { 0 };  		if (slots >= work_to_do) { -			netdev_err(vif->dev, +			netdev_err(queue->vif->dev,  				   "Asked for %d slots but exceeds this limit\n",  				   work_to_do); -			xenvif_fatal_tx_err(vif); +			xenvif_fatal_tx_err(queue->vif);  			return -ENODATA;  		} @@ -727,10 +828,10 @@ static int xenvif_count_requests(struct xenvif *vif,  		 * considered malicious.  		 */  		if (unlikely(slots >= fatal_skb_slots)) { -			netdev_err(vif->dev, +			netdev_err(queue->vif->dev,  				   "Malicious frontend using %d slots, threshold %u\n",  				   slots, fatal_skb_slots); -			xenvif_fatal_tx_err(vif); +			xenvif_fatal_tx_err(queue->vif);  			return -E2BIG;  		} @@ -743,7 +844,7 @@ static int xenvif_count_requests(struct xenvif *vif,  		 */  		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {  			if (net_ratelimit()) -				netdev_dbg(vif->dev, +				netdev_dbg(queue->vif->dev,  					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",  					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);  			drop_err = -E2BIG; @@ -752,7 +853,7 @@ static int xenvif_count_requests(struct xenvif *vif,  		if (drop_err)  			txp = &dropped_tx; -		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), +		memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),  		       sizeof(*txp));  		/* If the guest submitted a frame >= 64 KiB then @@ -766,7 +867,7 @@ static int xenvif_count_requests(struct xenvif *vif,  		 */  		if (!drop_err && txp->size > first->size) {  			if (net_ratelimit()) -				netdev_dbg(vif->dev, +				netdev_dbg(queue->vif->dev,  					   "Invalid tx request, slot size %u > remaining size %u\n",  					   txp->size, first->size);  			drop_err = -EIO; @@ -776,9 +877,9 @@ static int xenvif_count_requests(struct xenvif *vif,  		slots++;  		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { -			netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", +			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",  				 txp->offset, txp->size); -			xenvif_fatal_tx_err(vif); +			xenvif_fatal_tx_err(queue->vif);  			return -EINVAL;  		} @@ -790,219 +891,269 @@ static int xenvif_count_requests(struct xenvif *vif,  	} while (more_data);  	if (drop_err) { -		xenvif_tx_err(vif, first, cons + slots); +		xenvif_tx_err(queue, first, cons + slots);  		return drop_err;  	}  	return slots;  } -static struct page *xenvif_alloc_page(struct xenvif *vif, -				      u16 pending_idx) + +struct xenvif_tx_cb { +	u16 pending_idx; +}; + +#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) + +static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, +					  u16 pending_idx, +					  struct xen_netif_tx_request *txp, +					  struct gnttab_map_grant_ref *mop)  { -	struct page *page; +	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; +	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), +			  GNTMAP_host_map | GNTMAP_readonly, +			  txp->gref, queue->vif->domid); -	page = alloc_page(GFP_ATOMIC|__GFP_COLD); -	if (!page) +	memcpy(&queue->pending_tx_info[pending_idx].req, txp, +	       sizeof(*txp)); +} + +static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) +{ +	struct sk_buff *skb = +		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, +			  GFP_ATOMIC | __GFP_NOWARN); +	if (unlikely(skb == NULL))  		return NULL; -	vif->mmap_pages[pending_idx] = page; -	return page; +	/* Packets passed to netif_rx() must have some headroom. */ +	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + +	/* Initialize it here to avoid later surprises */ +	skb_shinfo(skb)->destructor_arg = NULL; + +	return skb;  } -static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, -					       struct sk_buff *skb, -					       struct xen_netif_tx_request *txp, -					       struct gnttab_copy *gop) +static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, +							struct sk_buff *skb, +							struct xen_netif_tx_request *txp, +							struct gnttab_map_grant_ref *gop)  {  	struct skb_shared_info *shinfo = skb_shinfo(skb);  	skb_frag_t *frags = shinfo->frags; -	u16 pending_idx = *((u16 *)skb->data); -	u16 head_idx = 0; -	int slot, start; -	struct page *page; -	pending_ring_idx_t index, start_idx = 0; -	uint16_t dst_offset; -	unsigned int nr_slots; -	struct pending_tx_info *first = NULL; +	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; +	int start; +	pending_ring_idx_t index; +	unsigned int nr_slots, frag_overflow = 0;  	/* At this point shinfo->nr_frags is in fact the number of  	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.  	 */ +	if (shinfo->nr_frags > MAX_SKB_FRAGS) { +		frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; +		BUG_ON(frag_overflow > MAX_SKB_FRAGS); +		shinfo->nr_frags = MAX_SKB_FRAGS; +	}  	nr_slots = shinfo->nr_frags;  	/* Skip first skb fragment if it is on same page as header fragment. */  	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); -	/* Coalesce tx requests, at this point the packet passed in -	 * should be <= 64K. Any packets larger than 64K have been -	 * handled in xenvif_count_requests(). -	 */ -	for (shinfo->nr_frags = slot = start; slot < nr_slots; -	     shinfo->nr_frags++) { -		struct pending_tx_info *pending_tx_info = -			vif->pending_tx_info; - -		page = alloc_page(GFP_ATOMIC|__GFP_COLD); -		if (!page) -			goto err; - -		dst_offset = 0; -		first = NULL; -		while (dst_offset < PAGE_SIZE && slot < nr_slots) { -			gop->flags = GNTCOPY_source_gref; - -			gop->source.u.ref = txp->gref; -			gop->source.domid = vif->domid; -			gop->source.offset = txp->offset; - -			gop->dest.domid = DOMID_SELF; - -			gop->dest.offset = dst_offset; -			gop->dest.u.gmfn = virt_to_mfn(page_address(page)); - -			if (dst_offset + txp->size > PAGE_SIZE) { -				/* This page can only merge a portion -				 * of tx request. Do not increment any -				 * pointer / counter here. The txp -				 * will be dealt with in future -				 * rounds, eventually hitting the -				 * `else` branch. -				 */ -				gop->len = PAGE_SIZE - dst_offset; -				txp->offset += gop->len; -				txp->size -= gop->len; -				dst_offset += gop->len; /* quit loop */ -			} else { -				/* This tx request can be merged in the page */ -				gop->len = txp->size; -				dst_offset += gop->len; - -				index = pending_index(vif->pending_cons++); - -				pending_idx = vif->pending_ring[index]; +	for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; +	     shinfo->nr_frags++, txp++, gop++) { +		index = pending_index(queue->pending_cons++); +		pending_idx = queue->pending_ring[index]; +		xenvif_tx_create_map_op(queue, pending_idx, txp, gop); +		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); +	} -				memcpy(&pending_tx_info[pending_idx].req, txp, -				       sizeof(*txp)); +	if (frag_overflow) { +		struct sk_buff *nskb = xenvif_alloc_skb(0); +		if (unlikely(nskb == NULL)) { +			if (net_ratelimit()) +				netdev_err(queue->vif->dev, +					   "Can't allocate the frag_list skb.\n"); +			return NULL; +		} -				/* Poison these fields, corresponding -				 * fields for head tx req will be set -				 * to correct values after the loop. -				 */ -				vif->mmap_pages[pending_idx] = (void *)(~0UL); -				pending_tx_info[pending_idx].head = -					INVALID_PENDING_RING_IDX; - -				if (!first) { -					first = &pending_tx_info[pending_idx]; -					start_idx = index; -					head_idx = pending_idx; -				} - -				txp++; -				slot++; -			} +		shinfo = skb_shinfo(nskb); +		frags = shinfo->frags; -			gop++; +		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; +		     shinfo->nr_frags++, txp++, gop++) { +			index = pending_index(queue->pending_cons++); +			pending_idx = queue->pending_ring[index]; +			xenvif_tx_create_map_op(queue, pending_idx, txp, gop); +			frag_set_pending_idx(&frags[shinfo->nr_frags], +					     pending_idx);  		} -		first->req.offset = 0; -		first->req.size = dst_offset; -		first->head = start_idx; -		vif->mmap_pages[head_idx] = page; -		frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); +		skb_shinfo(skb)->frag_list = nskb;  	} -	BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS); -  	return gop; -err: -	/* Unwind, freeing all pages and sending error responses. */ -	while (shinfo->nr_frags-- > start) { -		xenvif_idx_release(vif, -				frag_get_pending_idx(&frags[shinfo->nr_frags]), -				XEN_NETIF_RSP_ERROR); +} + +static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, +					   u16 pending_idx, +					   grant_handle_t handle) +{ +	if (unlikely(queue->grant_tx_handle[pending_idx] != +		     NETBACK_INVALID_HANDLE)) { +		netdev_err(queue->vif->dev, +			   "Trying to overwrite active handle! pending_idx: %x\n", +			   pending_idx); +		BUG();  	} -	/* The head too, if necessary. */ -	if (start) -		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); +	queue->grant_tx_handle[pending_idx] = handle; +} -	return NULL; +static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, +					     u16 pending_idx) +{ +	if (unlikely(queue->grant_tx_handle[pending_idx] == +		     NETBACK_INVALID_HANDLE)) { +		netdev_err(queue->vif->dev, +			   "Trying to unmap invalid handle! pending_idx: %x\n", +			   pending_idx); +		BUG(); +	} +	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;  } -static int xenvif_tx_check_gop(struct xenvif *vif, +static int xenvif_tx_check_gop(struct xenvif_queue *queue,  			       struct sk_buff *skb, -			       struct gnttab_copy **gopp) +			       struct gnttab_map_grant_ref **gopp_map, +			       struct gnttab_copy **gopp_copy)  { -	struct gnttab_copy *gop = *gopp; -	u16 pending_idx = *((u16 *)skb->data); +	struct gnttab_map_grant_ref *gop_map = *gopp_map; +	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; +	/* This always points to the shinfo of the skb being checked, which +	 * could be either the first or the one on the frag_list +	 */  	struct skb_shared_info *shinfo = skb_shinfo(skb); -	struct pending_tx_info *tx_info; +	/* If this is non-NULL, we are currently checking the frag_list skb, and +	 * this points to the shinfo of the first one +	 */ +	struct skb_shared_info *first_shinfo = NULL;  	int nr_frags = shinfo->nr_frags; -	int i, err, start; -	u16 peek; /* peek into next tx request */ +	const bool sharedslot = nr_frags && +				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; +	int i, err;  	/* Check status of header. */ -	err = gop->status; -	if (unlikely(err)) -		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); - -	/* Skip first skb fragment if it is on same page as header fragment. */ -	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); +	err = (*gopp_copy)->status; +	if (unlikely(err)) { +		if (net_ratelimit()) +			netdev_dbg(queue->vif->dev, +				   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", +				   (*gopp_copy)->status, +				   pending_idx, +				   (*gopp_copy)->source.u.ref); +		/* The first frag might still have this slot mapped */ +		if (!sharedslot) +			xenvif_idx_release(queue, pending_idx, +					   XEN_NETIF_RSP_ERROR); +	} +	(*gopp_copy)++; -	for (i = start; i < nr_frags; i++) { +check_frags: +	for (i = 0; i < nr_frags; i++, gop_map++) {  		int j, newerr; -		pending_ring_idx_t head;  		pending_idx = frag_get_pending_idx(&shinfo->frags[i]); -		tx_info = &vif->pending_tx_info[pending_idx]; -		head = tx_info->head;  		/* Check error status: if okay then remember grant handle. */ -		do { -			newerr = (++gop)->status; -			if (newerr) -				break; -			peek = vif->pending_ring[pending_index(++head)]; -		} while (!pending_tx_is_head(vif, peek)); +		newerr = gop_map->status;  		if (likely(!newerr)) { +			xenvif_grant_handle_set(queue, +						pending_idx, +						gop_map->handle);  			/* Had a previous error? Invalidate this fragment. */ -			if (unlikely(err)) -				xenvif_idx_release(vif, pending_idx, -						   XEN_NETIF_RSP_OKAY); +			if (unlikely(err)) { +				xenvif_idx_unmap(queue, pending_idx); +				/* If the mapping of the first frag was OK, but +				 * the header's copy failed, and they are +				 * sharing a slot, send an error +				 */ +				if (i == 0 && sharedslot) +					xenvif_idx_release(queue, pending_idx, +							   XEN_NETIF_RSP_ERROR); +				else +					xenvif_idx_release(queue, pending_idx, +							   XEN_NETIF_RSP_OKAY); +			}  			continue;  		}  		/* Error on this fragment: respond to client with an error. */ -		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); +		if (net_ratelimit()) +			netdev_dbg(queue->vif->dev, +				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", +				   i, +				   gop_map->status, +				   pending_idx, +				   gop_map->ref); + +		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);  		/* Not the first error? Preceding frags already invalidated. */  		if (err)  			continue; -		/* First error: invalidate header and preceding fragments. */ -		pending_idx = *((u16 *)skb->data); -		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); -		for (j = start; j < i; j++) { +		/* First error: if the header haven't shared a slot with the +		 * first frag, release it as well. +		 */ +		if (!sharedslot) +			xenvif_idx_release(queue, +					   XENVIF_TX_CB(skb)->pending_idx, +					   XEN_NETIF_RSP_OKAY); + +		/* Invalidate preceding fragments of this skb. */ +		for (j = 0; j < i; j++) {  			pending_idx = frag_get_pending_idx(&shinfo->frags[j]); -			xenvif_idx_release(vif, pending_idx, +			xenvif_idx_unmap(queue, pending_idx); +			xenvif_idx_release(queue, pending_idx,  					   XEN_NETIF_RSP_OKAY);  		} +		/* And if we found the error while checking the frag_list, unmap +		 * the first skb's frags +		 */ +		if (first_shinfo) { +			for (j = 0; j < first_shinfo->nr_frags; j++) { +				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); +				xenvif_idx_unmap(queue, pending_idx); +				xenvif_idx_release(queue, pending_idx, +						   XEN_NETIF_RSP_OKAY); +			} +		} +  		/* Remember the error: invalidate all subsequent fragments. */  		err = newerr;  	} -	*gopp = gop + 1; +	if (skb_has_frag_list(skb) && !first_shinfo) { +		first_shinfo = skb_shinfo(skb); +		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); +		nr_frags = shinfo->nr_frags; + +		goto check_frags; +	} + +	*gopp_map = gop_map;  	return err;  } -static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) +static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)  {  	struct skb_shared_info *shinfo = skb_shinfo(skb);  	int nr_frags = shinfo->nr_frags;  	int i; +	u16 prev_pending_idx = INVALID_PENDING_IDX;  	for (i = 0; i < nr_frags; i++) {  		skb_frag_t *frag = shinfo->frags + i; @@ -1012,46 +1163,62 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)  		pending_idx = frag_get_pending_idx(frag); -		txp = &vif->pending_tx_info[pending_idx].req; -		page = virt_to_page(idx_to_kaddr(vif, pending_idx)); +		/* If this is not the first frag, chain it to the previous*/ +		if (prev_pending_idx == INVALID_PENDING_IDX) +			skb_shinfo(skb)->destructor_arg = +				&callback_param(queue, pending_idx); +		else +			callback_param(queue, prev_pending_idx).ctx = +				&callback_param(queue, pending_idx); + +		callback_param(queue, pending_idx).ctx = NULL; +		prev_pending_idx = pending_idx; + +		txp = &queue->pending_tx_info[pending_idx].req; +		page = virt_to_page(idx_to_kaddr(queue, pending_idx));  		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);  		skb->len += txp->size;  		skb->data_len += txp->size;  		skb->truesize += txp->size; -		/* Take an extra reference to offset xenvif_idx_release */ -		get_page(vif->mmap_pages[pending_idx]); -		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); +		/* Take an extra reference to offset network stack's put_page */ +		get_page(queue->mmap_pages[pending_idx]);  	} +	/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc +	 * overlaps with "index", and "mapping" is not set. I think mapping +	 * should be set. If delivered to local stack, it would drop this +	 * skb in sk_filter unless the socket has the right to use it. +	 */ +	skb->pfmemalloc	= false;  } -static int xenvif_get_extras(struct xenvif *vif, +static int xenvif_get_extras(struct xenvif_queue *queue,  				struct xen_netif_extra_info *extras,  				int work_to_do)  {  	struct xen_netif_extra_info extra; -	RING_IDX cons = vif->tx.req_cons; +	RING_IDX cons = queue->tx.req_cons;  	do {  		if (unlikely(work_to_do-- <= 0)) { -			netdev_err(vif->dev, "Missing extra info\n"); -			xenvif_fatal_tx_err(vif); +			netdev_err(queue->vif->dev, "Missing extra info\n"); +			xenvif_fatal_tx_err(queue->vif);  			return -EBADR;  		} -		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), +		memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),  		       sizeof(extra));  		if (unlikely(!extra.type ||  			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { -			vif->tx.req_cons = ++cons; -			netdev_err(vif->dev, +			queue->tx.req_cons = ++cons; +			netdev_err(queue->vif->dev,  				   "Invalid extra type: %d\n", extra.type); -			xenvif_fatal_tx_err(vif); +			xenvif_fatal_tx_err(queue->vif);  			return -EINVAL;  		}  		memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); -		vif->tx.req_cons = ++cons; +		queue->tx.req_cons = ++cons;  	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);  	return work_to_do; @@ -1067,113 +1234,72 @@ static int xenvif_set_skb_gso(struct xenvif *vif,  		return -EINVAL;  	} -	/* Currently only TCPv4 S.O. is supported. */ -	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { +	switch (gso->u.gso.type) { +	case XEN_NETIF_GSO_TYPE_TCPV4: +		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +		break; +	case XEN_NETIF_GSO_TYPE_TCPV6: +		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; +		break; +	default:  		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);  		xenvif_fatal_tx_err(vif);  		return -EINVAL;  	}  	skb_shinfo(skb)->gso_size = gso->u.gso.size; -	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - -	/* Header must be checked, and gso_segs computed. */ -	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; -	skb_shinfo(skb)->gso_segs = 0; +	/* gso_segs will be calculated later */  	return 0;  } -static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) +static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)  { -	struct iphdr *iph; -	int err = -EPROTO; -	int recalculate_partial_csum = 0; +	bool recalculate_partial_csum = false; -	/* -	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy +	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy  	 * peers can fail to set NETRXF_csum_blank when sending a GSO  	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and  	 * recalculate the partial checksum.  	 */  	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { -		vif->rx_gso_checksum_fixup++; +		queue->stats.rx_gso_checksum_fixup++;  		skb->ip_summed = CHECKSUM_PARTIAL; -		recalculate_partial_csum = 1; +		recalculate_partial_csum = true;  	}  	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */  	if (skb->ip_summed != CHECKSUM_PARTIAL)  		return 0; -	if (skb->protocol != htons(ETH_P_IP)) -		goto out; - -	iph = (void *)skb->data; -	switch (iph->protocol) { -	case IPPROTO_TCP: -		if (!skb_partial_csum_set(skb, 4 * iph->ihl, -					  offsetof(struct tcphdr, check))) -			goto out; - -		if (recalculate_partial_csum) { -			struct tcphdr *tcph = tcp_hdr(skb); -			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, -							 skb->len - iph->ihl*4, -							 IPPROTO_TCP, 0); -		} -		break; -	case IPPROTO_UDP: -		if (!skb_partial_csum_set(skb, 4 * iph->ihl, -					  offsetof(struct udphdr, check))) -			goto out; - -		if (recalculate_partial_csum) { -			struct udphdr *udph = udp_hdr(skb); -			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, -							 skb->len - iph->ihl*4, -							 IPPROTO_UDP, 0); -		} -		break; -	default: -		if (net_ratelimit()) -			netdev_err(vif->dev, -				   "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", -				   iph->protocol); -		goto out; -	} - -	err = 0; - -out: -	return err; +	return skb_checksum_setup(skb, recalculate_partial_csum);  } -static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) +static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)  { -	unsigned long now = jiffies; -	unsigned long next_credit = -		vif->credit_timeout.expires + -		msecs_to_jiffies(vif->credit_usec / 1000); +	u64 now = get_jiffies_64(); +	u64 next_credit = queue->credit_window_start + +		msecs_to_jiffies(queue->credit_usec / 1000);  	/* Timer could already be pending in rare cases. */ -	if (timer_pending(&vif->credit_timeout)) +	if (timer_pending(&queue->credit_timeout))  		return true;  	/* Passed the point where we can replenish credit? */ -	if (time_after_eq(now, next_credit)) { -		vif->credit_timeout.expires = now; -		tx_add_credit(vif); +	if (time_after_eq64(now, next_credit)) { +		queue->credit_window_start = now; +		tx_add_credit(queue);  	}  	/* Still too big to send right now? Set a callback. */ -	if (size > vif->remaining_credit) { -		vif->credit_timeout.data     = -			(unsigned long)vif; -		vif->credit_timeout.function = +	if (size > queue->remaining_credit) { +		queue->credit_timeout.data     = +			(unsigned long)queue; +		queue->credit_timeout.function =  			tx_credit_callback; -		mod_timer(&vif->credit_timeout, +		mod_timer(&queue->credit_timeout,  			  next_credit); +		queue->credit_window_start = next_credit;  		return true;  	} @@ -1181,17 +1307,18 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)  	return false;  } -static unsigned xenvif_tx_build_gops(struct xenvif *vif) +static void xenvif_tx_build_gops(struct xenvif_queue *queue, +				     int budget, +				     unsigned *copy_ops, +				     unsigned *map_ops)  { -	struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; +	struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;  	struct sk_buff *skb;  	int ret; -	while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX -		< MAX_PENDING_REQS)) { +	while (skb_queue_len(&queue->tx_queue) < budget) {  		struct xen_netif_tx_request txreq;  		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; -		struct page *page;  		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];  		u16 pending_idx;  		RING_IDX idx; @@ -1199,191 +1326,254 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)  		unsigned int data_len;  		pending_ring_idx_t index; -		if (vif->tx.sring->req_prod - vif->tx.req_cons > +		if (queue->tx.sring->req_prod - queue->tx.req_cons >  		    XEN_NETIF_TX_RING_SIZE) { -			netdev_err(vif->dev, +			netdev_err(queue->vif->dev,  				   "Impossible number of requests. "  				   "req_prod %d, req_cons %d, size %ld\n", -				   vif->tx.sring->req_prod, vif->tx.req_cons, +				   queue->tx.sring->req_prod, queue->tx.req_cons,  				   XEN_NETIF_TX_RING_SIZE); -			xenvif_fatal_tx_err(vif); -			continue; +			xenvif_fatal_tx_err(queue->vif); +			break;  		} -		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); +		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);  		if (!work_to_do)  			break; -		idx = vif->tx.req_cons; +		idx = queue->tx.req_cons;  		rmb(); /* Ensure that we see the request before we copy it. */ -		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); +		memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));  		/* Credit-based scheduling. */ -		if (txreq.size > vif->remaining_credit && -		    tx_credit_exceeded(vif, txreq.size)) +		if (txreq.size > queue->remaining_credit && +		    tx_credit_exceeded(queue, txreq.size))  			break; -		vif->remaining_credit -= txreq.size; +		queue->remaining_credit -= txreq.size;  		work_to_do--; -		vif->tx.req_cons = ++idx; +		queue->tx.req_cons = ++idx;  		memset(extras, 0, sizeof(extras));  		if (txreq.flags & XEN_NETTXF_extra_info) { -			work_to_do = xenvif_get_extras(vif, extras, +			work_to_do = xenvif_get_extras(queue, extras,  						       work_to_do); -			idx = vif->tx.req_cons; +			idx = queue->tx.req_cons;  			if (unlikely(work_to_do < 0))  				break;  		} -		ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); +		ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);  		if (unlikely(ret < 0))  			break;  		idx += ret;  		if (unlikely(txreq.size < ETH_HLEN)) { -			netdev_dbg(vif->dev, +			netdev_dbg(queue->vif->dev,  				   "Bad packet size: %d\n", txreq.size); -			xenvif_tx_err(vif, &txreq, idx); +			xenvif_tx_err(queue, &txreq, idx);  			break;  		}  		/* No crossing a page as the payload mustn't fragment. */  		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { -			netdev_err(vif->dev, +			netdev_err(queue->vif->dev,  				   "txreq.offset: %x, size: %u, end: %lu\n",  				   txreq.offset, txreq.size,  				   (txreq.offset&~PAGE_MASK) + txreq.size); -			xenvif_fatal_tx_err(vif); +			xenvif_fatal_tx_err(queue->vif);  			break;  		} -		index = pending_index(vif->pending_cons); -		pending_idx = vif->pending_ring[index]; +		index = pending_index(queue->pending_cons); +		pending_idx = queue->pending_ring[index];  		data_len = (txreq.size > PKT_PROT_LEN &&  			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?  			PKT_PROT_LEN : txreq.size; -		skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, -				GFP_ATOMIC | __GFP_NOWARN); +		skb = xenvif_alloc_skb(data_len);  		if (unlikely(skb == NULL)) { -			netdev_dbg(vif->dev, +			netdev_dbg(queue->vif->dev,  				   "Can't allocate a skb in start_xmit.\n"); -			xenvif_tx_err(vif, &txreq, idx); +			xenvif_tx_err(queue, &txreq, idx);  			break;  		} -		/* Packets passed to netif_rx() must have some headroom. */ -		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); -  		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {  			struct xen_netif_extra_info *gso;  			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; -			if (xenvif_set_skb_gso(vif, skb, gso)) { +			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {  				/* Failure in xenvif_set_skb_gso is fatal. */  				kfree_skb(skb);  				break;  			}  		} -		/* XXX could copy straight to head */ -		page = xenvif_alloc_page(vif, pending_idx); -		if (!page) { -			kfree_skb(skb); -			xenvif_tx_err(vif, &txreq, idx); -			break; -		} - -		gop->source.u.ref = txreq.gref; -		gop->source.domid = vif->domid; -		gop->source.offset = txreq.offset; - -		gop->dest.u.gmfn = virt_to_mfn(page_address(page)); -		gop->dest.domid = DOMID_SELF; -		gop->dest.offset = txreq.offset; +		XENVIF_TX_CB(skb)->pending_idx = pending_idx; -		gop->len = txreq.size; -		gop->flags = GNTCOPY_source_gref; +		__skb_put(skb, data_len); +		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; +		queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; +		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; -		gop++; +		queue->tx_copy_ops[*copy_ops].dest.u.gmfn = +			virt_to_mfn(skb->data); +		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; +		queue->tx_copy_ops[*copy_ops].dest.offset = +			offset_in_page(skb->data); -		memcpy(&vif->pending_tx_info[pending_idx].req, -		       &txreq, sizeof(txreq)); -		vif->pending_tx_info[pending_idx].head = index; -		*((u16 *)skb->data) = pending_idx; +		queue->tx_copy_ops[*copy_ops].len = data_len; +		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; -		__skb_put(skb, data_len); +		(*copy_ops)++;  		skb_shinfo(skb)->nr_frags = ret;  		if (data_len < txreq.size) {  			skb_shinfo(skb)->nr_frags++;  			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],  					     pending_idx); +			xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); +			gop++;  		} else {  			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],  					     INVALID_PENDING_IDX); +			memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, +			       sizeof(txreq));  		} -		vif->pending_cons++; +		queue->pending_cons++; -		request_gop = xenvif_get_requests(vif, skb, txfrags, gop); +		request_gop = xenvif_get_requests(queue, skb, txfrags, gop);  		if (request_gop == NULL) {  			kfree_skb(skb); -			xenvif_tx_err(vif, &txreq, idx); +			xenvif_tx_err(queue, &txreq, idx);  			break;  		}  		gop = request_gop; -		__skb_queue_tail(&vif->tx_queue, skb); +		__skb_queue_tail(&queue->tx_queue, skb); -		vif->tx.req_cons = idx; +		queue->tx.req_cons = idx; -		if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops)) +		if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || +		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))  			break;  	} -	return gop - vif->tx_copy_ops; +	(*map_ops) = gop - queue->tx_map_ops; +	return;  } +/* Consolidate skb with a frag_list into a brand new one with local pages on + * frags. Returns 0 or -ENOMEM if can't allocate new pages. + */ +static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) +{ +	unsigned int offset = skb_headlen(skb); +	skb_frag_t frags[MAX_SKB_FRAGS]; +	int i; +	struct ubuf_info *uarg; +	struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + +	queue->stats.tx_zerocopy_sent += 2; +	queue->stats.tx_frag_overflow++; -static int xenvif_tx_submit(struct xenvif *vif, int budget) +	xenvif_fill_frags(queue, nskb); +	/* Subtract frags size, we will correct it later */ +	skb->truesize -= skb->data_len; +	skb->len += nskb->len; +	skb->data_len += nskb->len; + +	/* create a brand new frags array and coalesce there */ +	for (i = 0; offset < skb->len; i++) { +		struct page *page; +		unsigned int len; + +		BUG_ON(i >= MAX_SKB_FRAGS); +		page = alloc_page(GFP_ATOMIC|__GFP_COLD); +		if (!page) { +			int j; +			skb->truesize += skb->data_len; +			for (j = 0; j < i; j++) +				put_page(frags[j].page.p); +			return -ENOMEM; +		} + +		if (offset + PAGE_SIZE < skb->len) +			len = PAGE_SIZE; +		else +			len = skb->len - offset; +		if (skb_copy_bits(skb, offset, page_address(page), len)) +			BUG(); + +		offset += len; +		frags[i].page.p = page; +		frags[i].page_offset = 0; +		skb_frag_size_set(&frags[i], len); +	} +	/* swap out with old one */ +	memcpy(skb_shinfo(skb)->frags, +	       frags, +	       i * sizeof(skb_frag_t)); +	skb_shinfo(skb)->nr_frags = i; +	skb->truesize += i * PAGE_SIZE; + +	/* remove traces of mapped pages and frag_list */ +	skb_frag_list_init(skb); +	uarg = skb_shinfo(skb)->destructor_arg; +	uarg->callback(uarg, true); +	skb_shinfo(skb)->destructor_arg = NULL; + +	skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; +	kfree_skb(nskb); + +	return 0; +} + +static int xenvif_tx_submit(struct xenvif_queue *queue)  { -	struct gnttab_copy *gop = vif->tx_copy_ops; +	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; +	struct gnttab_copy *gop_copy = queue->tx_copy_ops;  	struct sk_buff *skb;  	int work_done = 0; -	while (work_done < budget && -	       (skb = __skb_dequeue(&vif->tx_queue)) != NULL) { +	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {  		struct xen_netif_tx_request *txp;  		u16 pending_idx;  		unsigned data_len; -		pending_idx = *((u16 *)skb->data); -		txp = &vif->pending_tx_info[pending_idx].req; +		pending_idx = XENVIF_TX_CB(skb)->pending_idx; +		txp = &queue->pending_tx_info[pending_idx].req;  		/* Check the remap error code. */ -		if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) { -			netdev_dbg(vif->dev, "netback grant failed.\n"); +		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { +			/* If there was an error, xenvif_tx_check_gop is +			 * expected to release all the frags which were mapped, +			 * so kfree_skb shouldn't do it again +			 */  			skb_shinfo(skb)->nr_frags = 0; +			if (skb_has_frag_list(skb)) { +				struct sk_buff *nskb = +						skb_shinfo(skb)->frag_list; +				skb_shinfo(nskb)->nr_frags = 0; +			}  			kfree_skb(skb);  			continue;  		}  		data_len = skb->len; -		memcpy(skb->data, -		       (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), -		       data_len); +		callback_param(queue, pending_idx).ctx = NULL;  		if (data_len < txp->size) {  			/* Append the packet payload as a fragment. */  			txp->offset += data_len;  			txp->size -= data_len;  		} else {  			/* Schedule a response immediately. */ -			xenvif_idx_release(vif, pending_idx, +			xenvif_idx_release(queue, pending_idx,  					   XEN_NETIF_RSP_OKAY);  		} @@ -1392,141 +1582,253 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)  		else if (txp->flags & XEN_NETTXF_data_validated)  			skb->ip_summed = CHECKSUM_UNNECESSARY; -		xenvif_fill_frags(vif, skb); +		xenvif_fill_frags(queue, skb); -		/* -		 * If the initial fragment was < PKT_PROT_LEN then -		 * pull through some bytes from the other fragments to -		 * increase the linear region to PKT_PROT_LEN bytes. -		 */ -		if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) { +		if (unlikely(skb_has_frag_list(skb))) { +			if (xenvif_handle_frag_list(queue, skb)) { +				if (net_ratelimit()) +					netdev_err(queue->vif->dev, +						   "Not enough memory to consolidate frag_list!\n"); +				skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; +				kfree_skb(skb); +				continue; +			} +		} + +		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {  			int target = min_t(int, skb->len, PKT_PROT_LEN);  			__pskb_pull_tail(skb, target - skb_headlen(skb));  		} -		skb->dev      = vif->dev; +		skb->dev      = queue->vif->dev;  		skb->protocol = eth_type_trans(skb, skb->dev);  		skb_reset_network_header(skb); -		if (checksum_setup(vif, skb)) { -			netdev_dbg(vif->dev, +		if (checksum_setup(queue, skb)) { +			netdev_dbg(queue->vif->dev,  				   "Can't setup checksum in net_tx_action\n"); +			/* We have to set this flag to trigger the callback */ +			if (skb_shinfo(skb)->destructor_arg) +				skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;  			kfree_skb(skb);  			continue;  		}  		skb_probe_transport_header(skb, 0); -		vif->dev->stats.rx_bytes += skb->len; -		vif->dev->stats.rx_packets++; +		/* If the packet is GSO then we will have just set up the +		 * transport header offset in checksum_setup so it's now +		 * straightforward to calculate gso_segs. +		 */ +		if (skb_is_gso(skb)) { +			int mss = skb_shinfo(skb)->gso_size; +			int hdrlen = skb_transport_header(skb) - +				skb_mac_header(skb) + +				tcp_hdrlen(skb); + +			skb_shinfo(skb)->gso_segs = +				DIV_ROUND_UP(skb->len - hdrlen, mss); +		} + +		queue->stats.rx_bytes += skb->len; +		queue->stats.rx_packets++;  		work_done++; +		/* Set this flag right before netif_receive_skb, otherwise +		 * someone might think this packet already left netback, and +		 * do a skb_copy_ubufs while we are still in control of the +		 * skb. E.g. the __pskb_pull_tail earlier can do such thing. +		 */ +		if (skb_shinfo(skb)->destructor_arg) { +			skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; +			queue->stats.tx_zerocopy_sent++; +		} +  		netif_receive_skb(skb);  	}  	return work_done;  } -/* Called after netfront has transmitted */ -int xenvif_tx_action(struct xenvif *vif, int budget) +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)  { -	unsigned nr_gops; -	int work_done; - -	if (unlikely(!tx_work_todo(vif))) -		return 0; - -	nr_gops = xenvif_tx_build_gops(vif); - -	if (nr_gops == 0) -		return 0; - -	gnttab_batch_copy(vif->tx_copy_ops, nr_gops); - -	work_done = xenvif_tx_submit(vif, nr_gops); +	unsigned long flags; +	pending_ring_idx_t index; +	struct xenvif_queue *queue = ubuf_to_queue(ubuf); -	return work_done; +	/* This is the only place where we grab this lock, to protect callbacks +	 * from each other. +	 */ +	spin_lock_irqsave(&queue->callback_lock, flags); +	do { +		u16 pending_idx = ubuf->desc; +		ubuf = (struct ubuf_info *) ubuf->ctx; +		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= +			MAX_PENDING_REQS); +		index = pending_index(queue->dealloc_prod); +		queue->dealloc_ring[index] = pending_idx; +		/* Sync with xenvif_tx_dealloc_action: +		 * insert idx then incr producer. +		 */ +		smp_wmb(); +		queue->dealloc_prod++; +	} while (ubuf); +	wake_up(&queue->dealloc_wq); +	spin_unlock_irqrestore(&queue->callback_lock, flags); + +	if (likely(zerocopy_success)) +		queue->stats.tx_zerocopy_success++; +	else +		queue->stats.tx_zerocopy_fail++;  } -static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, -			       u8 status) +static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)  { -	struct pending_tx_info *pending_tx_info; -	pending_ring_idx_t head; -	u16 peek; /* peek into next tx request */ +	struct gnttab_unmap_grant_ref *gop; +	pending_ring_idx_t dc, dp; +	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; +	unsigned int i = 0; -	BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL)); +	dc = queue->dealloc_cons; +	gop = queue->tx_unmap_ops; -	/* Already complete? */ -	if (vif->mmap_pages[pending_idx] == NULL) -		return; +	/* Free up any grants we have finished using */ +	do { +		dp = queue->dealloc_prod; + +		/* Ensure we see all indices enqueued by all +		 * xenvif_zerocopy_callback(). +		 */ +		smp_rmb(); + +		while (dc != dp) { +			BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); +			pending_idx = +				queue->dealloc_ring[pending_index(dc++)]; + +			pending_idx_release[gop-queue->tx_unmap_ops] = +				pending_idx; +			queue->pages_to_unmap[gop-queue->tx_unmap_ops] = +				queue->mmap_pages[pending_idx]; +			gnttab_set_unmap_op(gop, +					    idx_to_kaddr(queue, pending_idx), +					    GNTMAP_host_map, +					    queue->grant_tx_handle[pending_idx]); +			xenvif_grant_handle_reset(queue, pending_idx); +			++gop; +		} -	pending_tx_info = &vif->pending_tx_info[pending_idx]; +	} while (dp != queue->dealloc_prod); + +	queue->dealloc_cons = dc; + +	if (gop - queue->tx_unmap_ops > 0) { +		int ret; +		ret = gnttab_unmap_refs(queue->tx_unmap_ops, +					NULL, +					queue->pages_to_unmap, +					gop - queue->tx_unmap_ops); +		if (ret) { +			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n", +				   gop - queue->tx_unmap_ops, ret); +			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { +				if (gop[i].status != GNTST_okay) +					netdev_err(queue->vif->dev, +						   " host_addr: %llx handle: %x status: %d\n", +						   gop[i].host_addr, +						   gop[i].handle, +						   gop[i].status); +			} +			BUG(); +		} +	} -	head = pending_tx_info->head; +	for (i = 0; i < gop - queue->tx_unmap_ops; ++i) +		xenvif_idx_release(queue, pending_idx_release[i], +				   XEN_NETIF_RSP_OKAY); +} -	BUG_ON(!pending_tx_is_head(vif, head)); -	BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx); -	do { -		pending_ring_idx_t index; -		pending_ring_idx_t idx = pending_index(head); -		u16 info_idx = vif->pending_ring[idx]; +/* Called after netfront has transmitted */ +int xenvif_tx_action(struct xenvif_queue *queue, int budget) +{ +	unsigned nr_mops, nr_cops = 0; +	int work_done, ret; -		pending_tx_info = &vif->pending_tx_info[info_idx]; -		make_tx_response(vif, &pending_tx_info->req, status); +	if (unlikely(!tx_work_todo(queue))) +		return 0; -		/* Setting any number other than -		 * INVALID_PENDING_RING_IDX indicates this slot is -		 * starting a new packet / ending a previous packet. -		 */ -		pending_tx_info->head = 0; +	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); -		index = pending_index(vif->pending_prod++); -		vif->pending_ring[index] = vif->pending_ring[info_idx]; +	if (nr_cops == 0) +		return 0; -		peek = vif->pending_ring[pending_index(++head)]; +	gnttab_batch_copy(queue->tx_copy_ops, nr_cops); +	if (nr_mops != 0) { +		ret = gnttab_map_refs(queue->tx_map_ops, +				      NULL, +				      queue->pages_to_map, +				      nr_mops); +		BUG_ON(ret); +	} -	} while (!pending_tx_is_head(vif, peek)); +	work_done = xenvif_tx_submit(queue); -	put_page(vif->mmap_pages[pending_idx]); -	vif->mmap_pages[pending_idx] = NULL; +	return work_done; +} + +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, +			       u8 status) +{ +	struct pending_tx_info *pending_tx_info; +	pending_ring_idx_t index; +	unsigned long flags; + +	pending_tx_info = &queue->pending_tx_info[pending_idx]; +	spin_lock_irqsave(&queue->response_lock, flags); +	make_tx_response(queue, &pending_tx_info->req, status); +	index = pending_index(queue->pending_prod); +	queue->pending_ring[index] = pending_idx; +	/* TX shouldn't use the index before we give it back here */ +	mb(); +	queue->pending_prod++; +	spin_unlock_irqrestore(&queue->response_lock, flags);  } -static void make_tx_response(struct xenvif *vif, +static void make_tx_response(struct xenvif_queue *queue,  			     struct xen_netif_tx_request *txp,  			     s8       st)  { -	RING_IDX i = vif->tx.rsp_prod_pvt; +	RING_IDX i = queue->tx.rsp_prod_pvt;  	struct xen_netif_tx_response *resp;  	int notify; -	resp = RING_GET_RESPONSE(&vif->tx, i); +	resp = RING_GET_RESPONSE(&queue->tx, i);  	resp->id     = txp->id;  	resp->status = st;  	if (txp->flags & XEN_NETTXF_extra_info) -		RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; +		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; -	vif->tx.rsp_prod_pvt = ++i; -	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); +	queue->tx.rsp_prod_pvt = ++i; +	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);  	if (notify) -		notify_remote_via_irq(vif->tx_irq); +		notify_remote_via_irq(queue->tx_irq);  } -static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,  					     u16      id,  					     s8       st,  					     u16      offset,  					     u16      size,  					     u16      flags)  { -	RING_IDX i = vif->rx.rsp_prod_pvt; +	RING_IDX i = queue->rx.rsp_prod_pvt;  	struct xen_netif_rx_response *resp; -	resp = RING_GET_RESPONSE(&vif->rx, i); +	resp = RING_GET_RESPONSE(&queue->rx, i);  	resp->offset     = offset;  	resp->flags      = flags;  	resp->id         = id; @@ -1534,38 +1836,67 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,  	if (st < 0)  		resp->status = (s16)st; -	vif->rx.rsp_prod_pvt = ++i; +	queue->rx.rsp_prod_pvt = ++i;  	return resp;  } -static inline int rx_work_todo(struct xenvif *vif) +void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)  { -	return !skb_queue_empty(&vif->rx_queue); +	int ret; +	struct gnttab_unmap_grant_ref tx_unmap_op; + +	gnttab_set_unmap_op(&tx_unmap_op, +			    idx_to_kaddr(queue, pending_idx), +			    GNTMAP_host_map, +			    queue->grant_tx_handle[pending_idx]); +	xenvif_grant_handle_reset(queue, pending_idx); + +	ret = gnttab_unmap_refs(&tx_unmap_op, NULL, +				&queue->mmap_pages[pending_idx], 1); +	if (ret) { +		netdev_err(queue->vif->dev, +			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", +			   ret, +			   pending_idx, +			   tx_unmap_op.host_addr, +			   tx_unmap_op.handle, +			   tx_unmap_op.status); +		BUG(); +	}  } -static inline int tx_work_todo(struct xenvif *vif) +static inline int rx_work_todo(struct xenvif_queue *queue)  { +	return (!skb_queue_empty(&queue->rx_queue) && +	       xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) || +	       queue->rx_queue_purge; +} -	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && -	    (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX -	     < MAX_PENDING_REQS)) +static inline int tx_work_todo(struct xenvif_queue *queue) +{ +	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))  		return 1;  	return 0;  } -void xenvif_unmap_frontend_rings(struct xenvif *vif) +static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) +{ +	return queue->dealloc_cons != queue->dealloc_prod; +} + +void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)  { -	if (vif->tx.sring) -		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), -					vif->tx.sring); -	if (vif->rx.sring) -		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), -					vif->rx.sring); +	if (queue->tx.sring) +		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), +					queue->tx.sring); +	if (queue->rx.sring) +		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), +					queue->rx.sring);  } -int xenvif_map_frontend_rings(struct xenvif *vif, +int xenvif_map_frontend_rings(struct xenvif_queue *queue,  			      grant_ref_t tx_ring_ref,  			      grant_ref_t rx_ring_ref)  { @@ -1575,48 +1906,102 @@ int xenvif_map_frontend_rings(struct xenvif *vif,  	int err = -ENOMEM; -	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), +	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),  				     tx_ring_ref, &addr);  	if (err)  		goto err;  	txs = (struct xen_netif_tx_sring *)addr; -	BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); +	BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); -	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), +	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),  				     rx_ring_ref, &addr);  	if (err)  		goto err;  	rxs = (struct xen_netif_rx_sring *)addr; -	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); - -	vif->rx_req_cons_peek = 0; +	BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);  	return 0;  err: -	xenvif_unmap_frontend_rings(vif); +	xenvif_unmap_frontend_rings(queue);  	return err;  } -int xenvif_kthread(void *data) +static void xenvif_start_queue(struct xenvif_queue *queue)  { -	struct xenvif *vif = data; +	if (xenvif_schedulable(queue->vif)) +		xenvif_wake_queue(queue); +} + +int xenvif_kthread_guest_rx(void *data) +{ +	struct xenvif_queue *queue = data; +	struct sk_buff *skb;  	while (!kthread_should_stop()) { -		wait_event_interruptible(vif->wq, -					 rx_work_todo(vif) || +		wait_event_interruptible(queue->wq, +					 rx_work_todo(queue) || +					 queue->vif->disabled ||  					 kthread_should_stop()); + +		/* This frontend is found to be rogue, disable it in +		 * kthread context. Currently this is only set when +		 * netback finds out frontend sends malformed packet, +		 * but we cannot disable the interface in softirq +		 * context so we defer it here, if this thread is +		 * associated with queue 0. +		 */ +		if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0)) +			xenvif_carrier_off(queue->vif); +  		if (kthread_should_stop())  			break; -		if (rx_work_todo(vif)) -			xenvif_rx_action(vif); +		if (queue->rx_queue_purge) { +			skb_queue_purge(&queue->rx_queue); +			queue->rx_queue_purge = false; +		} + +		if (!skb_queue_empty(&queue->rx_queue)) +			xenvif_rx_action(queue); + +		if (skb_queue_empty(&queue->rx_queue) && +		    xenvif_queue_stopped(queue)) { +			del_timer_sync(&queue->wake_queue); +			xenvif_start_queue(queue); +		}  		cond_resched();  	} +	/* Bin any remaining skbs */ +	while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) +		dev_kfree_skb(skb); + +	return 0; +} + +int xenvif_dealloc_kthread(void *data) +{ +	struct xenvif_queue *queue = data; + +	while (!kthread_should_stop()) { +		wait_event_interruptible(queue->dealloc_wq, +					 tx_dealloc_work_todo(queue) || +					 kthread_should_stop()); +		if (kthread_should_stop()) +			break; + +		xenvif_tx_dealloc_action(queue); +		cond_resched(); +	} + +	/* Unmap anything remaining*/ +	if (tx_dealloc_work_todo(queue)) +		xenvif_tx_dealloc_action(queue); +  	return 0;  } @@ -1627,6 +2012,9 @@ static int __init netback_init(void)  	if (!xen_domain())  		return -ENODEV; +	/* Allow as many queues as there are CPUs, by default */ +	xenvif_max_queues = num_online_cpus(); +  	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {  		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",  			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); @@ -1637,6 +2025,8 @@ static int __init netback_init(void)  	if (rc)  		goto failed_init; +	rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); +  	return 0;  failed_init:  | 
