diff options
Diffstat (limited to 'drivers/net/xen-netback')
| -rw-r--r-- | drivers/net/xen-netback/common.h | 47 | ||||
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 58 | ||||
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 500 | ||||
| -rw-r--r-- | drivers/net/xen-netback/xenbus.c | 3 | 
4 files changed, 158 insertions, 450 deletions
| diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 08ae01b41c8..4c76bcb9a87 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -101,6 +101,13 @@ struct xenvif_rx_meta {  #define MAX_PENDING_REQS 256 +/* It's possible for an skb to have a maximal number of frags + * but still be less than MAX_BUFFER_OFFSET in size. Thus the + * worst-case number of copy operations is MAX_SKB_FRAGS per + * ring slot. + */ +#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) +  struct xenvif {  	/* Unique identifier for this interface. */  	domid_t          domid; @@ -136,20 +143,19 @@ struct xenvif {  	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */  	struct xen_netif_rx_back_ring rx;  	struct sk_buff_head rx_queue; - -	/* Allow xenvif_start_xmit() to peek ahead in the rx request -	 * ring.  This is a prediction of what rx_req_cons will be -	 * once all queued skbs are put on the ring. +	bool rx_queue_stopped; +	/* Set when the RX interrupt is triggered by the frontend. +	 * The worker thread may need to wake the queue.  	 */ -	RING_IDX rx_req_cons_peek; +	bool rx_event; -	/* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each -	 * head/fragment page uses 2 copy operations because it -	 * straddles two buffers in the frontend. -	 */ -	struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; -	struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; +	/* This array is allocated seperately as it is large */ +	struct gnttab_copy *grant_copy_op; +	/* We create one meta structure per ring request we consume, so +	 * the maximum number is the same as the ring size. +	 */ +	struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];  	u8               fe_dev_addr[6]; @@ -198,8 +204,6 @@ void xenvif_xenbus_fini(void);  int xenvif_schedulable(struct xenvif *vif); -int xenvif_rx_ring_full(struct xenvif *vif); -  int xenvif_must_stop_queue(struct xenvif *vif);  /* (Un)Map communication rings. */ @@ -211,21 +215,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif,  /* Check for SKBs from frontend and schedule backend processing */  void xenvif_check_rx_xenvif(struct xenvif *vif); -/* Queue an SKB for transmission to the frontend */ -void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); -/* Notify xenvif that ring now has space to send an skb to the frontend */ -void xenvif_notify_tx_completion(struct xenvif *vif); -  /* Prevent the device from generating any further traffic. */  void xenvif_carrier_off(struct xenvif *vif); -/* Returns number of ring slots required to send an skb to the frontend */ -unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); -  int xenvif_tx_action(struct xenvif *vif, int budget); -void xenvif_rx_action(struct xenvif *vif);  int xenvif_kthread(void *data); +void xenvif_kick_thread(struct xenvif *vif); + +/* Determine whether the needed number of slots (req) are available, + * and set req_event if not. + */ +bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); + +void xenvif_stop_queue(struct xenvif *vif);  extern bool separate_tx_rx_irq; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 870f1fa5837..b9de31ea7fc 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -34,6 +34,7 @@  #include <linux/ethtool.h>  #include <linux/rtnetlink.h>  #include <linux/if_vlan.h> +#include <linux/vmalloc.h>  #include <xen/events.h>  #include <asm/xen/hypercall.h> @@ -46,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)  	return netif_running(vif->dev) && netif_carrier_ok(vif->dev);  } -static int xenvif_rx_schedulable(struct xenvif *vif) -{ -	return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); -} -  static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)  {  	struct xenvif *vif = dev_id; @@ -104,8 +100,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)  {  	struct xenvif *vif = dev_id; -	if (xenvif_rx_schedulable(vif)) -		netif_wake_queue(vif->dev); +	vif->rx_event = true; +	xenvif_kick_thread(vif);  	return IRQ_HANDLED;  } @@ -121,24 +117,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)  static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)  {  	struct xenvif *vif = netdev_priv(dev); +	int min_slots_needed;  	BUG_ON(skb->dev != dev);  	/* Drop the packet if vif is not ready */ -	if (vif->task == NULL) +	if (vif->task == NULL || !xenvif_schedulable(vif))  		goto drop; -	/* Drop the packet if the target domain has no receive buffers. */ -	if (!xenvif_rx_schedulable(vif)) -		goto drop; +	/* At best we'll need one slot for the header and one for each +	 * frag. +	 */ +	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; -	/* Reserve ring slots for the worst-case number of fragments. */ -	vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); +	/* If the skb is GSO then we'll also need an extra slot for the +	 * metadata. +	 */ +	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || +	    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) +		min_slots_needed++; -	if (vif->can_queue && xenvif_must_stop_queue(vif)) -		netif_stop_queue(dev); +	/* If the skb can't possibly fit in the remaining slots +	 * then turn off the queue to give the ring a chance to +	 * drain. +	 */ +	if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) +		xenvif_stop_queue(vif); -	xenvif_queue_tx_skb(vif, skb); +	skb_queue_tail(&vif->rx_queue, skb); +	xenvif_kick_thread(vif);  	return NETDEV_TX_OK; @@ -148,12 +155,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)  	return NETDEV_TX_OK;  } -void xenvif_notify_tx_completion(struct xenvif *vif) -{ -	if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) -		netif_wake_queue(vif->dev); -} -  static struct net_device_stats *xenvif_get_stats(struct net_device *dev)  {  	struct xenvif *vif = netdev_priv(dev); @@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,  	SET_NETDEV_DEV(dev, parent);  	vif = netdev_priv(dev); + +	vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * +				     MAX_GRANT_COPY_OPS); +	if (vif->grant_copy_op == NULL) { +		pr_warn("Could not allocate grant copy space for %s\n", name); +		free_netdev(dev); +		return ERR_PTR(-ENOMEM); +	} +  	vif->domid  = domid;  	vif->handle = handle;  	vif->can_sg = 1; @@ -378,6 +388,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,  	if (err < 0)  		goto err; +	init_waitqueue_head(&vif->wq); +  	if (tx_evtchn == rx_evtchn) {  		/* feature-split-event-channels == 0 */  		err = bind_interdomain_evtchn_to_irqhandler( @@ -410,7 +422,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,  		disable_irq(vif->rx_irq);  	} -	init_waitqueue_head(&vif->wq);  	task = kthread_create(xenvif_kthread,  			      (void *)vif, "%s", vif->dev->name);  	if (IS_ERR(task)) { @@ -487,6 +498,7 @@ void xenvif_free(struct xenvif *vif)  	unregister_netdev(vif->dev); +	vfree(vif->grant_copy_op);  	free_netdev(vif->dev);  	module_put(THIS_MODULE); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 27bbe58dcbe..6b62c3eb8e1 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -39,7 +39,6 @@  #include <linux/udp.h>  #include <net/tcp.h> -#include <net/ip6_checksum.h>  #include <xen/xen.h>  #include <xen/events.h> @@ -138,36 +137,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)  		vif->pending_prod + vif->pending_cons;  } -static int max_required_rx_slots(struct xenvif *vif) +bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)  { -	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); +	RING_IDX prod, cons; -	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ -	if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask) -		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ +	do { +		prod = vif->rx.sring->req_prod; +		cons = vif->rx.req_cons; -	return max; -} +		if (prod - cons >= needed) +			return true; -int xenvif_rx_ring_full(struct xenvif *vif) -{ -	RING_IDX peek   = vif->rx_req_cons_peek; -	RING_IDX needed = max_required_rx_slots(vif); - -	return ((vif->rx.sring->req_prod - peek) < needed) || -	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); -} +		vif->rx.sring->req_event = prod + 1; -int xenvif_must_stop_queue(struct xenvif *vif) -{ -	if (!xenvif_rx_ring_full(vif)) -		return 0; - -	vif->rx.sring->req_event = vif->rx_req_cons_peek + -		max_required_rx_slots(vif); -	mb(); /* request notification /then/ check the queue */ +		/* Make sure event is visible before we check prod +		 * again. +		 */ +		mb(); +	} while (vif->rx.sring->req_prod != prod); -	return xenvif_rx_ring_full(vif); +	return false;  }  /* @@ -210,93 +199,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)  	return false;  } -struct xenvif_count_slot_state { -	unsigned long copy_off; -	bool head; -}; - -unsigned int xenvif_count_frag_slots(struct xenvif *vif, -				     unsigned long offset, unsigned long size, -				     struct xenvif_count_slot_state *state) -{ -	unsigned count = 0; - -	offset &= ~PAGE_MASK; - -	while (size > 0) { -		unsigned long bytes; - -		bytes = PAGE_SIZE - offset; - -		if (bytes > size) -			bytes = size; - -		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) { -			count++; -			state->copy_off = 0; -		} - -		if (state->copy_off + bytes > MAX_BUFFER_OFFSET) -			bytes = MAX_BUFFER_OFFSET - state->copy_off; - -		state->copy_off += bytes; - -		offset += bytes; -		size -= bytes; - -		if (offset == PAGE_SIZE) -			offset = 0; - -		state->head = false; -	} - -	return count; -} - -/* - * Figure out how many ring slots we're going to need to send @skb to - * the guest. This function is essentially a dry run of - * xenvif_gop_frag_copy. - */ -unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) -{ -	struct xenvif_count_slot_state state; -	unsigned int count; -	unsigned char *data; -	unsigned i; - -	state.head = true; -	state.copy_off = 0; - -	/* Slot for the first (partial) page of data. */ -	count = 1; - -	/* Need a slot for the GSO prefix for GSO extra data? */ -	if (skb_shinfo(skb)->gso_size) -		count++; - -	data = skb->data; -	while (data < skb_tail_pointer(skb)) { -		unsigned long offset = offset_in_page(data); -		unsigned long size = PAGE_SIZE - offset; - -		if (data + size > skb_tail_pointer(skb)) -			size = skb_tail_pointer(skb) - data; - -		count += xenvif_count_frag_slots(vif, offset, size, &state); - -		data += size; -	} - -	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { -		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); -		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; - -		count += xenvif_count_frag_slots(vif, offset, size, &state); -	} -	return count; -} -  struct netrx_pending_operations {  	unsigned copy_prod, copy_cons;  	unsigned meta_prod, meta_cons; @@ -557,12 +459,12 @@ struct skb_cb_overlay {  	int meta_slots_used;  }; -static void xenvif_kick_thread(struct xenvif *vif) +void xenvif_kick_thread(struct xenvif *vif)  {  	wake_up(&vif->wq);  } -void xenvif_rx_action(struct xenvif *vif) +static void xenvif_rx_action(struct xenvif *vif)  {  	s8 status;  	u16 flags; @@ -571,11 +473,10 @@ void xenvif_rx_action(struct xenvif *vif)  	struct sk_buff *skb;  	LIST_HEAD(notify);  	int ret; -	int nr_frags; -	int count;  	unsigned long offset;  	struct skb_cb_overlay *sco; -	int need_to_notify = 0; +	bool need_to_notify = false; +	bool ring_full = false;  	struct netrx_pending_operations npo = {  		.copy  = vif->grant_copy_op, @@ -584,38 +485,54 @@ void xenvif_rx_action(struct xenvif *vif)  	skb_queue_head_init(&rxq); -	count = 0; -  	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { -		vif = netdev_priv(skb->dev); -		nr_frags = skb_shinfo(skb)->nr_frags; +		int max_slots_needed; +		int i; + +		/* We need a cheap worse case estimate for the number of +		 * slots we'll use. +		 */ + +		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + +						skb_headlen(skb), +						PAGE_SIZE); +		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { +			unsigned int size; +			size = skb_frag_size(&skb_shinfo(skb)->frags[i]); +			max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); +		} +		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || +		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) +			max_slots_needed++; + +		/* If the skb may not fit then bail out now */ +		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { +			skb_queue_head(&vif->rx_queue, skb); +			need_to_notify = true; +			ring_full = true; +			break; +		}  		sco = (struct skb_cb_overlay *)skb->cb;  		sco->meta_slots_used = xenvif_gop_skb(skb, &npo); - -		count += nr_frags + 1; +		BUG_ON(sco->meta_slots_used > max_slots_needed);  		__skb_queue_tail(&rxq, skb); - -		/* Filled the batch queue? */ -		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ -		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) -			break;  	}  	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); +	vif->rx_queue_stopped = !npo.copy_prod && ring_full; +  	if (!npo.copy_prod) -		return; +		goto done; -	BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); +	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);  	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);  	while ((skb = __skb_dequeue(&rxq)) != NULL) {  		sco = (struct skb_cb_overlay *)skb->cb; -		vif = netdev_priv(skb->dev); -  		if ((1 << vif->meta[npo.meta_cons].gso_type) &  		    vif->gso_prefix_mask) {  			resp = RING_GET_RESPONSE(&vif->rx, @@ -678,28 +595,15 @@ void xenvif_rx_action(struct xenvif *vif)  		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); -		if (ret) -			need_to_notify = 1; - -		xenvif_notify_tx_completion(vif); +		need_to_notify |= !!ret;  		npo.meta_cons += sco->meta_slots_used;  		dev_kfree_skb(skb);  	} +done:  	if (need_to_notify)  		notify_remote_via_irq(vif->rx_irq); - -	/* More work to do? */ -	if (!skb_queue_empty(&vif->rx_queue)) -		xenvif_kick_thread(vif); -} - -void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) -{ -	skb_queue_tail(&vif->rx_queue, skb); - -	xenvif_kick_thread(vif);  }  void xenvif_check_rx_xenvif(struct xenvif *vif) @@ -1141,257 +1045,14 @@ static int xenvif_set_skb_gso(struct xenvif *vif,  	}  	skb_shinfo(skb)->gso_size = gso->u.gso.size; - -	/* Header must be checked, and gso_segs computed. */ -	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; -	skb_shinfo(skb)->gso_segs = 0; - -	return 0; -} - -static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len, -				  unsigned int max) -{ -	if (skb_headlen(skb) >= len) -		return 0; - -	/* If we need to pullup then pullup to the max, so we -	 * won't need to do it again. -	 */ -	if (max > skb->len) -		max = skb->len; - -	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) -		return -ENOMEM; - -	if (skb_headlen(skb) < len) -		return -EPROTO; +	/* gso_segs will be calculated later */  	return 0;  } -/* This value should be large enough to cover a tagged ethernet header plus - * maximally sized IP and TCP or UDP headers. - */ -#define MAX_IP_HDR_LEN 128 - -static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, -			     int recalculate_partial_csum) -{ -	unsigned int off; -	bool fragment; -	int err; - -	fragment = false; - -	err = maybe_pull_tail(skb, -			      sizeof(struct iphdr), -			      MAX_IP_HDR_LEN); -	if (err < 0) -		goto out; - -	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) -		fragment = true; - -	off = ip_hdrlen(skb); - -	err = -EPROTO; - -	if (fragment) -		goto out; - -	switch (ip_hdr(skb)->protocol) { -	case IPPROTO_TCP: -		err = maybe_pull_tail(skb, -				      off + sizeof(struct tcphdr), -				      MAX_IP_HDR_LEN); -		if (err < 0) -			goto out; - -		if (!skb_partial_csum_set(skb, off, -					  offsetof(struct tcphdr, check))) -			goto out; - -		if (recalculate_partial_csum) -			tcp_hdr(skb)->check = -				~csum_tcpudp_magic(ip_hdr(skb)->saddr, -						   ip_hdr(skb)->daddr, -						   skb->len - off, -						   IPPROTO_TCP, 0); -		break; -	case IPPROTO_UDP: -		err = maybe_pull_tail(skb, -				      off + sizeof(struct udphdr), -				      MAX_IP_HDR_LEN); -		if (err < 0) -			goto out; - -		if (!skb_partial_csum_set(skb, off, -					  offsetof(struct udphdr, check))) -			goto out; - -		if (recalculate_partial_csum) -			udp_hdr(skb)->check = -				~csum_tcpudp_magic(ip_hdr(skb)->saddr, -						   ip_hdr(skb)->daddr, -						   skb->len - off, -						   IPPROTO_UDP, 0); -		break; -	default: -		goto out; -	} - -	err = 0; - -out: -	return err; -} - -/* This value should be large enough to cover a tagged ethernet header plus - * an IPv6 header, all options, and a maximal TCP or UDP header. - */ -#define MAX_IPV6_HDR_LEN 256 - -#define OPT_HDR(type, skb, off) \ -	(type *)(skb_network_header(skb) + (off)) - -static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, -			       int recalculate_partial_csum) -{ -	int err; -	u8 nexthdr; -	unsigned int off; -	unsigned int len; -	bool fragment; -	bool done; - -	fragment = false; -	done = false; - -	off = sizeof(struct ipv6hdr); - -	err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); -	if (err < 0) -		goto out; - -	nexthdr = ipv6_hdr(skb)->nexthdr; - -	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); -	while (off <= len && !done) { -		switch (nexthdr) { -		case IPPROTO_DSTOPTS: -		case IPPROTO_HOPOPTS: -		case IPPROTO_ROUTING: { -			struct ipv6_opt_hdr *hp; - -			err = maybe_pull_tail(skb, -					      off + -					      sizeof(struct ipv6_opt_hdr), -					      MAX_IPV6_HDR_LEN); -			if (err < 0) -				goto out; - -			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); -			nexthdr = hp->nexthdr; -			off += ipv6_optlen(hp); -			break; -		} -		case IPPROTO_AH: { -			struct ip_auth_hdr *hp; - -			err = maybe_pull_tail(skb, -					      off + -					      sizeof(struct ip_auth_hdr), -					      MAX_IPV6_HDR_LEN); -			if (err < 0) -				goto out; - -			hp = OPT_HDR(struct ip_auth_hdr, skb, off); -			nexthdr = hp->nexthdr; -			off += ipv6_authlen(hp); -			break; -		} -		case IPPROTO_FRAGMENT: { -			struct frag_hdr *hp; - -			err = maybe_pull_tail(skb, -					      off + -					      sizeof(struct frag_hdr), -					      MAX_IPV6_HDR_LEN); -			if (err < 0) -				goto out; - -			hp = OPT_HDR(struct frag_hdr, skb, off); - -			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) -				fragment = true; - -			nexthdr = hp->nexthdr; -			off += sizeof(struct frag_hdr); -			break; -		} -		default: -			done = true; -			break; -		} -	} - -	err = -EPROTO; - -	if (!done || fragment) -		goto out; - -	switch (nexthdr) { -	case IPPROTO_TCP: -		err = maybe_pull_tail(skb, -				      off + sizeof(struct tcphdr), -				      MAX_IPV6_HDR_LEN); -		if (err < 0) -			goto out; - -		if (!skb_partial_csum_set(skb, off, -					  offsetof(struct tcphdr, check))) -			goto out; - -		if (recalculate_partial_csum) -			tcp_hdr(skb)->check = -				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, -						 &ipv6_hdr(skb)->daddr, -						 skb->len - off, -						 IPPROTO_TCP, 0); -		break; -	case IPPROTO_UDP: -		err = maybe_pull_tail(skb, -				      off + sizeof(struct udphdr), -				      MAX_IPV6_HDR_LEN); -		if (err < 0) -			goto out; - -		if (!skb_partial_csum_set(skb, off, -					  offsetof(struct udphdr, check))) -			goto out; - -		if (recalculate_partial_csum) -			udp_hdr(skb)->check = -				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, -						 &ipv6_hdr(skb)->daddr, -						 skb->len - off, -						 IPPROTO_UDP, 0); -		break; -	default: -		goto out; -	} - -	err = 0; - -out: -	return err; -} -  static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)  { -	int err = -EPROTO; -	int recalculate_partial_csum = 0; +	bool recalculate_partial_csum = false;  	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy  	 * peers can fail to set NETRXF_csum_blank when sending a GSO @@ -1401,19 +1062,14 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)  	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {  		vif->rx_gso_checksum_fixup++;  		skb->ip_summed = CHECKSUM_PARTIAL; -		recalculate_partial_csum = 1; +		recalculate_partial_csum = true;  	}  	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */  	if (skb->ip_summed != CHECKSUM_PARTIAL)  		return 0; -	if (skb->protocol == htons(ETH_P_IP)) -		err = checksum_setup_ip(vif, skb, recalculate_partial_csum); -	else if (skb->protocol == htons(ETH_P_IPV6)) -		err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum); - -	return err; +	return skb_checksum_setup(skb, recalculate_partial_csum);  }  static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) @@ -1679,6 +1335,20 @@ static int xenvif_tx_submit(struct xenvif *vif)  		skb_probe_transport_header(skb, 0); +		/* If the packet is GSO then we will have just set up the +		 * transport header offset in checksum_setup so it's now +		 * straightforward to calculate gso_segs. +		 */ +		if (skb_is_gso(skb)) { +			int mss = skb_shinfo(skb)->gso_size; +			int hdrlen = skb_transport_header(skb) - +				skb_mac_header(skb) + +				tcp_hdrlen(skb); + +			skb_shinfo(skb)->gso_segs = +				DIV_ROUND_UP(skb->len - hdrlen, mss); +		} +  		vif->dev->stats.rx_bytes += skb->len;  		vif->dev->stats.rx_packets++; @@ -1803,7 +1473,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,  static inline int rx_work_todo(struct xenvif *vif)  { -	return !skb_queue_empty(&vif->rx_queue); +	return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) || +		vif->rx_event;  }  static inline int tx_work_todo(struct xenvif *vif) @@ -1853,8 +1524,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif,  	rxs = (struct xen_netif_rx_sring *)addr;  	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); -	vif->rx_req_cons_peek = 0; -  	return 0;  err: @@ -1862,9 +1531,24 @@ err:  	return err;  } +void xenvif_stop_queue(struct xenvif *vif) +{ +	if (!vif->can_queue) +		return; + +	netif_stop_queue(vif->dev); +} + +static void xenvif_start_queue(struct xenvif *vif) +{ +	if (xenvif_schedulable(vif)) +		netif_wake_queue(vif->dev); +} +  int xenvif_kthread(void *data)  {  	struct xenvif *vif = data; +	struct sk_buff *skb;  	while (!kthread_should_stop()) {  		wait_event_interruptible(vif->wq, @@ -1873,12 +1557,22 @@ int xenvif_kthread(void *data)  		if (kthread_should_stop())  			break; -		if (rx_work_todo(vif)) +		if (!skb_queue_empty(&vif->rx_queue))  			xenvif_rx_action(vif); +		vif->rx_event = false; + +		if (skb_queue_empty(&vif->rx_queue) && +		    netif_queue_stopped(vif->dev)) +			xenvif_start_queue(vif); +  		cond_resched();  	} +	/* Bin any remaining skbs */ +	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) +		dev_kfree_skb(skb); +  	return 0;  } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index f0358992b04..7a206cffb06 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -15,8 +15,7 @@   * GNU General Public License for more details.   *   * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA + * along with this program; if not, see <http://www.gnu.org/licenses/>.  */  #include "common.h" | 
