diff options
Diffstat (limited to 'drivers/net/xen-netback')
| -rw-r--r-- | drivers/net/xen-netback/common.h | 215 | ||||
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 534 | ||||
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 1880 | ||||
| -rw-r--r-- | drivers/net/xen-netback/xenbus.c | 185 |
4 files changed, 1677 insertions, 1137 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index c47794b9d42..2532ce85d71 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -48,37 +48,19 @@ typedef unsigned int pending_ring_idx_t; #define INVALID_PENDING_RING_IDX (~0U) -/* For the head field in pending_tx_info: it is used to indicate - * whether this tx info is the head of one or more coalesced requests. - * - * When head != INVALID_PENDING_RING_IDX, it means the start of a new - * tx requests queue and the end of previous queue. - * - * An example sequence of head fields (I = INVALID_PENDING_RING_IDX): - * - * ...|0 I I I|5 I|9 I I I|... - * -->|<-INUSE---------------- - * - * After consuming the first slot(s) we have: - * - * ...|V V V V|5 I|9 I I I|... - * -----FREE->|<-INUSE-------- - * - * where V stands for "valid pending ring index". Any number other - * than INVALID_PENDING_RING_IDX is OK. These entries are considered - * free and can contain any number other than - * INVALID_PENDING_RING_IDX. In practice we use 0. - * - * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the - * above example) number is the index into pending_tx_info and - * mmap_pages arrays. - */ struct pending_tx_info { - struct xen_netif_tx_request req; /* coalesced tx request */ - pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX - * if it is head of one or more tx - * reqs - */ + struct xen_netif_tx_request req; /* tx request */ + /* Callback data for released SKBs. The callback is always + * xenvif_zerocopy_callback, desc contains the pending_idx, which is + * also an index in pending_tx_info array. It is initialized in + * xenvif_alloc and it never changes. + * skb_shinfo(skb)->destructor_arg points to the first mapped slot's + * callback_struct in this array of struct pending_tx_info's, then ctx + * to the next, or NULL if there is no more slot for this skb. + * ubuf_to_vif is a helper which finds the struct xenvif from a pointer + * to this field. + */ + struct ubuf_info callback_struct; }; #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) @@ -99,7 +81,7 @@ struct xenvif_rx_meta { #define MAX_BUFFER_OFFSET PAGE_SIZE -#define MAX_PENDING_REQS 256 +#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE /* It's possible for an skb to have a maximal number of frags * but still be less than MAX_BUFFER_OFFSET in size. Thus the @@ -108,17 +90,52 @@ struct xenvif_rx_meta { */ #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) -struct xenvif { - /* Unique identifier for this interface. */ - domid_t domid; - unsigned int handle; +#define NETBACK_INVALID_HANDLE -1 + +/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating + * the maximum slots a valid packet can use. Now this value is defined + * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by + * all backend. + */ +#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN + +/* Queue name is interface name with "-qNNN" appended */ +#define QUEUE_NAME_SIZE (IFNAMSIZ + 5) + +/* IRQ name is queue name with "-tx" or "-rx" appended */ +#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) + +struct xenvif; + +struct xenvif_stats { + /* Stats fields to be updated per-queue. + * A subset of struct net_device_stats that contains only the + * fields that are updated in netback.c for each queue. + */ + unsigned int rx_bytes; + unsigned int rx_packets; + unsigned int tx_bytes; + unsigned int tx_packets; + + /* Additional stats used by xenvif */ + unsigned long rx_gso_checksum_fixup; + unsigned long tx_zerocopy_sent; + unsigned long tx_zerocopy_success; + unsigned long tx_zerocopy_fail; + unsigned long tx_frag_overflow; +}; + +struct xenvif_queue { /* Per-queue data for xenvif */ + unsigned int id; /* Queue ID, 0-based */ + char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ + struct xenvif *vif; /* Parent VIF */ /* Use NAPI for guest TX */ struct napi_struct napi; /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ unsigned int tx_irq; /* Only used when feature-split-event-channels = 1 */ - char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */ + char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ struct xen_netif_tx_back_ring tx; struct sk_buff_head tx_queue; struct page *mmap_pages[MAX_PENDING_REQS]; @@ -126,13 +143,27 @@ struct xenvif { pending_ring_idx_t pending_cons; u16 pending_ring[MAX_PENDING_REQS]; struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; - - /* Coalescing tx requests before copying makes number of grant - * copy ops greater or equal to number of slots required. In - * worst case a tx request consumes 2 gnttab_copy. + grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; + + struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; + struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; + struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; + /* passed to gnttab_[un]map_refs with pages under (un)mapping */ + struct page *pages_to_map[MAX_PENDING_REQS]; + struct page *pages_to_unmap[MAX_PENDING_REQS]; + + /* This prevents zerocopy callbacks to race over dealloc_ring */ + spinlock_t callback_lock; + /* This prevents dealloc thread and NAPI instance to race over response + * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err + * it only protect response creation */ - struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; - + spinlock_t response_lock; + pending_ring_idx_t dealloc_prod; + pending_ring_idx_t dealloc_cons; + u16 dealloc_ring[MAX_PENDING_REQS]; + struct task_struct *dealloc_task; + wait_queue_head_t dealloc_wq; /* Use kthread for guest RX */ struct task_struct *task; @@ -140,24 +171,37 @@ struct xenvif { /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ unsigned int rx_irq; /* Only used when feature-split-event-channels = 1 */ - char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ + char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ struct xen_netif_rx_back_ring rx; struct sk_buff_head rx_queue; + RING_IDX rx_last_skb_slots; + bool rx_queue_purge; - /* Allow xenvif_start_xmit() to peek ahead in the rx request - * ring. This is a prediction of what rx_req_cons will be - * once all queued skbs are put on the ring. - */ - RING_IDX rx_req_cons_peek; + struct timer_list wake_queue; - /* This array is allocated seperately as it is large */ - struct gnttab_copy *grant_copy_op; + struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; /* We create one meta structure per ring request we consume, so * the maximum number is the same as the ring size. */ struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; + /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ + unsigned long credit_bytes; + unsigned long credit_usec; + unsigned long remaining_credit; + struct timer_list credit_timeout; + u64 credit_window_start; + + /* Statistics */ + struct xenvif_stats stats; +}; + +struct xenvif { + /* Unique identifier for this interface. */ + domid_t domid; + unsigned int handle; + u8 fe_dev_addr[6]; /* Frontend feature information. */ @@ -171,15 +215,14 @@ struct xenvif { /* Internal feature information. */ u8 can_queue:1; /* can queue packets for receiver? */ - /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ - unsigned long credit_bytes; - unsigned long credit_usec; - unsigned long remaining_credit; - struct timer_list credit_timeout; - u64 credit_window_start; + /* Is this interface disabled? True when backend discovers + * frontend is rogue. + */ + bool disabled; - /* Statistics */ - unsigned long rx_gso_checksum_fixup; + /* Queues */ + struct xenvif_queue *queues; + unsigned int num_queues; /* active queues, resource allocated */ /* Miscellaneous private stuff. */ struct net_device *dev; @@ -194,7 +237,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, unsigned int handle); -int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, +int xenvif_init_queue(struct xenvif_queue *queue); +void xenvif_deinit_queue(struct xenvif_queue *queue); + +int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, unsigned long rx_ring_ref, unsigned int tx_evtchn, unsigned int rx_evtchn); void xenvif_disconnect(struct xenvif *vif); @@ -205,35 +251,56 @@ void xenvif_xenbus_fini(void); int xenvif_schedulable(struct xenvif *vif); -int xenvif_rx_ring_full(struct xenvif *vif); +int xenvif_must_stop_queue(struct xenvif_queue *queue); -int xenvif_must_stop_queue(struct xenvif *vif); +int xenvif_queue_stopped(struct xenvif_queue *queue); +void xenvif_wake_queue(struct xenvif_queue *queue); /* (Un)Map communication rings. */ -void xenvif_unmap_frontend_rings(struct xenvif *vif); -int xenvif_map_frontend_rings(struct xenvif *vif, +void xenvif_unmap_frontend_rings(struct xenvif_queue *queue); +int xenvif_map_frontend_rings(struct xenvif_queue *queue, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref); /* Check for SKBs from frontend and schedule backend processing */ -void xenvif_check_rx_xenvif(struct xenvif *vif); - -/* Queue an SKB for transmission to the frontend */ -void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); -/* Notify xenvif that ring now has space to send an skb to the frontend */ -void xenvif_notify_tx_completion(struct xenvif *vif); +void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); /* Prevent the device from generating any further traffic. */ void xenvif_carrier_off(struct xenvif *vif); -/* Returns number of ring slots required to send an skb to the frontend */ -unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); +int xenvif_tx_action(struct xenvif_queue *queue, int budget); + +int xenvif_kthread_guest_rx(void *data); +void xenvif_kick_thread(struct xenvif_queue *queue); -int xenvif_tx_action(struct xenvif *vif, int budget); -void xenvif_rx_action(struct xenvif *vif); +int xenvif_dealloc_kthread(void *data); + +/* Determine whether the needed number of slots (req) are available, + * and set req_event if not. + */ +bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed); -int xenvif_kthread(void *data); +void xenvif_carrier_on(struct xenvif *vif); + +/* Callback from stack when TX packet can be released */ +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); + +/* Unmap a pending page and release it back to the guest */ +void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); + +static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) +{ + return MAX_PENDING_REQS - + queue->pending_prod + queue->pending_cons; +} + +/* Callback from stack when TX packet can be released */ +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); extern bool separate_tx_rx_irq; +extern unsigned int rx_drain_timeout_msecs; +extern unsigned int rx_drain_timeout_jiffies; +extern unsigned int xenvif_max_queues; + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fff8cddfed8..9e97c7ca0dd 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -38,64 +38,56 @@ #include <xen/events.h> #include <asm/xen/hypercall.h> +#include <xen/balloon.h> #define XENVIF_QUEUE_LENGTH 32 #define XENVIF_NAPI_WEIGHT 64 -int xenvif_schedulable(struct xenvif *vif) +static inline void xenvif_stop_queue(struct xenvif_queue *queue) { - return netif_running(vif->dev) && netif_carrier_ok(vif->dev); + struct net_device *dev = queue->vif->dev; + + if (!queue->vif->can_queue) + return; + + netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); } -static int xenvif_rx_schedulable(struct xenvif *vif) +int xenvif_schedulable(struct xenvif *vif) { - return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); + return netif_running(vif->dev) && netif_carrier_ok(vif->dev); } static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) { - struct xenvif *vif = dev_id; + struct xenvif_queue *queue = dev_id; - if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) - napi_schedule(&vif->napi); + if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) + napi_schedule(&queue->napi); return IRQ_HANDLED; } -static int xenvif_poll(struct napi_struct *napi, int budget) +int xenvif_poll(struct napi_struct *napi, int budget) { - struct xenvif *vif = container_of(napi, struct xenvif, napi); + struct xenvif_queue *queue = + container_of(napi, struct xenvif_queue, napi); int work_done; - work_done = xenvif_tx_action(vif, budget); + /* This vif is rogue, we pretend we've there is nothing to do + * for this vif to deschedule it from NAPI. But this interface + * will be turned off in thread context later. + */ + if (unlikely(queue->vif->disabled)) { + napi_complete(napi); + return 0; + } + + work_done = xenvif_tx_action(queue, budget); if (work_done < budget) { - int more_to_do = 0; - unsigned long flags; - - /* It is necessary to disable IRQ before calling - * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might - * lose event from the frontend. - * - * Consider: - * RING_HAS_UNCONSUMED_REQUESTS - * <frontend generates event to trigger napi_schedule> - * __napi_complete - * - * This handler is still in scheduled state so the - * event has no effect at all. After __napi_complete - * this handler is descheduled and cannot get - * scheduled again. We lose event in this case and the ring - * will be completely stalled. - */ - - local_irq_save(flags); - - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); - if (!more_to_do) - __napi_complete(napi); - - local_irq_restore(flags); + napi_complete(napi); + xenvif_napi_schedule_or_enable_events(queue); } return work_done; @@ -103,10 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget) static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { - struct xenvif *vif = dev_id; + struct xenvif_queue *queue = dev_id; - if (xenvif_rx_schedulable(vif)) - netif_wake_queue(vif->dev); + xenvif_kick_thread(queue); return IRQ_HANDLED; } @@ -119,27 +110,87 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +int xenvif_queue_stopped(struct xenvif_queue *queue) +{ + struct net_device *dev = queue->vif->dev; + unsigned int id = queue->id; + return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); +} + +void xenvif_wake_queue(struct xenvif_queue *queue) +{ + struct net_device *dev = queue->vif->dev; + unsigned int id = queue->id; + netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); +} + +/* Callback to wake the queue and drain it on timeout */ +static void xenvif_wake_queue_callback(unsigned long data) +{ + struct xenvif_queue *queue = (struct xenvif_queue *)data; + + if (xenvif_queue_stopped(queue)) { + netdev_err(queue->vif->dev, "draining TX queue\n"); + queue->rx_queue_purge = true; + xenvif_kick_thread(queue); + xenvif_wake_queue(queue); + } +} + static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); + struct xenvif_queue *queue = NULL; + unsigned int num_queues = vif->num_queues; + u16 index; + int min_slots_needed; BUG_ON(skb->dev != dev); - /* Drop the packet if vif is not ready */ - if (vif->task == NULL) + /* Drop the packet if queues are not set up */ + if (num_queues < 1) goto drop; - /* Drop the packet if the target domain has no receive buffers. */ - if (!xenvif_rx_schedulable(vif)) + /* Obtain the queue to be used to transmit this packet */ + index = skb_get_queue_mapping(skb); + if (index >= num_queues) { + pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", + index, vif->dev->name); + index %= num_queues; + } + queue = &vif->queues[index]; + + /* Drop the packet if queue is not ready */ + if (queue->task == NULL || + queue->dealloc_task == NULL || + !xenvif_schedulable(vif)) goto drop; - /* Reserve ring slots for the worst-case number of fragments. */ - vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); + /* At best we'll need one slot for the header and one for each + * frag. + */ + min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; - if (vif->can_queue && xenvif_must_stop_queue(vif)) - netif_stop_queue(dev); + /* If the skb is GSO then we'll also need an extra slot for the + * metadata. + */ + if (skb_is_gso(skb)) + min_slots_needed++; + + /* If the skb can't possibly fit in the remaining slots + * then turn off the queue to give the ring a chance to + * drain. + */ + if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { + queue->wake_queue.function = xenvif_wake_queue_callback; + queue->wake_queue.data = (unsigned long)queue; + xenvif_stop_queue(queue); + mod_timer(&queue->wake_queue, + jiffies + rx_drain_timeout_jiffies); + } - xenvif_queue_tx_skb(vif, skb); + skb_queue_tail(&queue->rx_queue, skb); + xenvif_kick_thread(queue); return NETDEV_TX_OK; @@ -149,34 +200,68 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -void xenvif_notify_tx_completion(struct xenvif *vif) -{ - if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) - netif_wake_queue(vif->dev); -} - static struct net_device_stats *xenvif_get_stats(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); + struct xenvif_queue *queue = NULL; + unsigned int num_queues = vif->num_queues; + unsigned long rx_bytes = 0; + unsigned long rx_packets = 0; + unsigned long tx_bytes = 0; + unsigned long tx_packets = 0; + unsigned int index; + + if (vif->queues == NULL) + goto out; + + /* Aggregate tx and rx stats from each queue */ + for (index = 0; index < num_queues; ++index) { + queue = &vif->queues[index]; + rx_bytes += queue->stats.rx_bytes; + rx_packets += queue->stats.rx_packets; + tx_bytes += queue->stats.tx_bytes; + tx_packets += queue->stats.tx_packets; + } + +out: + vif->dev->stats.rx_bytes = rx_bytes; + vif->dev->stats.rx_packets = rx_packets; + vif->dev->stats.tx_bytes = tx_bytes; + vif->dev->stats.tx_packets = tx_packets; + return &vif->dev->stats; } static void xenvif_up(struct xenvif *vif) { - napi_enable(&vif->napi); - enable_irq(vif->tx_irq); - if (vif->tx_irq != vif->rx_irq) - enable_irq(vif->rx_irq); - xenvif_check_rx_xenvif(vif); + struct xenvif_queue *queue = NULL; + unsigned int num_queues = vif->num_queues; + unsigned int queue_index; + + for (queue_index = 0; queue_index < num_queues; ++queue_index) { + queue = &vif->queues[queue_index]; + napi_enable(&queue->napi); + enable_irq(queue->tx_irq); + if (queue->tx_irq != queue->rx_irq) + enable_irq(queue->rx_irq); + xenvif_napi_schedule_or_enable_events(queue); + } } static void xenvif_down(struct xenvif *vif) { - napi_disable(&vif->napi); - disable_irq(vif->tx_irq); - if (vif->tx_irq != vif->rx_irq) - disable_irq(vif->rx_irq); - del_timer_sync(&vif->credit_timeout); + struct xenvif_queue *queue = NULL; + unsigned int num_queues = vif->num_queues; + unsigned int queue_index; + + for (queue_index = 0; queue_index < num_queues; ++queue_index) { + queue = &vif->queues[queue_index]; + napi_disable(&queue->napi); + disable_irq(queue->tx_irq); + if (queue->tx_irq != queue->rx_irq) + disable_irq(queue->rx_irq); + del_timer_sync(&queue->credit_timeout); + } } static int xenvif_open(struct net_device *dev) @@ -184,7 +269,7 @@ static int xenvif_open(struct net_device *dev) struct xenvif *vif = netdev_priv(dev); if (netif_carrier_ok(dev)) xenvif_up(vif); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); return 0; } @@ -193,7 +278,7 @@ static int xenvif_close(struct net_device *dev) struct xenvif *vif = netdev_priv(dev); if (netif_carrier_ok(dev)) xenvif_down(vif); - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); return 0; } @@ -233,7 +318,29 @@ static const struct xenvif_stat { } xenvif_stats[] = { { "rx_gso_checksum_fixup", - offsetof(struct xenvif, rx_gso_checksum_fixup) + offsetof(struct xenvif_stats, rx_gso_checksum_fixup) + }, + /* If (sent != success + fail), there are probably packets never + * freed up properly! + */ + { + "tx_zerocopy_sent", + offsetof(struct xenvif_stats, tx_zerocopy_sent), + }, + { + "tx_zerocopy_success", + offsetof(struct xenvif_stats, tx_zerocopy_success), + }, + { + "tx_zerocopy_fail", + offsetof(struct xenvif_stats, tx_zerocopy_fail) + }, + /* Number of packets exceeding MAX_SKB_FRAG slots. You should use + * a guest with the same MAX_SKB_FRAG + */ + { + "tx_frag_overflow", + offsetof(struct xenvif_stats, tx_frag_overflow) }, }; @@ -250,11 +357,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set) static void xenvif_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { - void *vif = netdev_priv(dev); + struct xenvif *vif = netdev_priv(dev); + unsigned int num_queues = vif->num_queues; int i; - - for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) - data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); + unsigned int queue_index; + struct xenvif_stats *vif_stats; + + for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { + unsigned long accum = 0; + for (queue_index = 0; queue_index < num_queues; ++queue_index) { + vif_stats = &vif->queues[queue_index].stats; + accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); + } + data[i] = accum; + } } static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) @@ -296,10 +412,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, struct net_device *dev; struct xenvif *vif; char name[IFNAMSIZ] = {}; - int i; snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); - dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); + /* Allocate a netdev with the max. supported number of queues. + * When the guest selects the desired number, it will be updated + * via netif_set_real_num_*_queues(). + */ + dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, + xenvif_max_queues); if (dev == NULL) { pr_warn("Could not allocate netdev for %s\n", name); return ERR_PTR(-ENOMEM); @@ -309,44 +429,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, vif = netdev_priv(dev); - vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * - MAX_GRANT_COPY_OPS); - if (vif->grant_copy_op == NULL) { - pr_warn("Could not allocate grant copy space for %s\n", name); - free_netdev(dev); - return ERR_PTR(-ENOMEM); - } - vif->domid = domid; vif->handle = handle; vif->can_sg = 1; vif->ip_csum = 1; vif->dev = dev; + vif->disabled = false; - vif->credit_bytes = vif->remaining_credit = ~0UL; - vif->credit_usec = 0UL; - init_timer(&vif->credit_timeout); - vif->credit_window_start = get_jiffies_64(); + /* Start out with no queues. */ + vif->queues = NULL; + vif->num_queues = 0; dev->netdev_ops = &xenvif_netdev_ops; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; dev->features = dev->hw_features | NETIF_F_RXCSUM; - SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); + dev->ethtool_ops = &xenvif_ethtool_ops; dev->tx_queue_len = XENVIF_QUEUE_LENGTH; - skb_queue_head_init(&vif->rx_queue); - skb_queue_head_init(&vif->tx_queue); - - vif->pending_cons = 0; - vif->pending_prod = MAX_PENDING_REQS; - for (i = 0; i < MAX_PENDING_REQS; i++) - vif->pending_ring[i] = i; - for (i = 0; i < MAX_PENDING_REQS; i++) - vif->mmap_pages[i] = NULL; - /* * Initialise a dummy MAC address. We choose the numerically * largest non-broadcast address to prevent the address getting @@ -356,8 +458,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, memset(dev->dev_addr, 0xFF, ETH_ALEN); dev->dev_addr[0] &= ~0x01; - netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT); - netif_carrier_off(dev); err = register_netdev(dev); @@ -374,84 +474,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, return vif; } -int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, +int xenvif_init_queue(struct xenvif_queue *queue) +{ + int err, i; + + queue->credit_bytes = queue->remaining_credit = ~0UL; + queue->credit_usec = 0UL; + init_timer(&queue->credit_timeout); + queue->credit_window_start = get_jiffies_64(); + + skb_queue_head_init(&queue->rx_queue); + skb_queue_head_init(&queue->tx_queue); + + queue->pending_cons = 0; + queue->pending_prod = MAX_PENDING_REQS; + for (i = 0; i < MAX_PENDING_REQS; ++i) + queue->pending_ring[i] = i; + + spin_lock_init(&queue->callback_lock); + spin_lock_init(&queue->response_lock); + + /* If ballooning is disabled, this will consume real memory, so you + * better enable it. The long term solution would be to use just a + * bunch of valid page descriptors, without dependency on ballooning + */ + err = alloc_xenballooned_pages(MAX_PENDING_REQS, + queue->mmap_pages, + false); + if (err) { + netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); + return -ENOMEM; + } + + for (i = 0; i < MAX_PENDING_REQS; i++) { + queue->pending_tx_info[i].callback_struct = (struct ubuf_info) + { .callback = xenvif_zerocopy_callback, + .ctx = NULL, + .desc = i }; + queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; + } + + init_timer(&queue->wake_queue); + + netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, + XENVIF_NAPI_WEIGHT); + + return 0; +} + +void xenvif_carrier_on(struct xenvif *vif) +{ + rtnl_lock(); + if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) + dev_set_mtu(vif->dev, ETH_DATA_LEN); + netdev_update_features(vif->dev); + netif_carrier_on(vif->dev); + if (netif_running(vif->dev)) + xenvif_up(vif); + rtnl_unlock(); +} + +int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, unsigned long rx_ring_ref, unsigned int tx_evtchn, unsigned int rx_evtchn) { struct task_struct *task; int err = -ENOMEM; - BUG_ON(vif->tx_irq); - BUG_ON(vif->task); + BUG_ON(queue->tx_irq); + BUG_ON(queue->task); + BUG_ON(queue->dealloc_task); - err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); + err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); if (err < 0) goto err; + init_waitqueue_head(&queue->wq); + init_waitqueue_head(&queue->dealloc_wq); + if (tx_evtchn == rx_evtchn) { /* feature-split-event-channels == 0 */ err = bind_interdomain_evtchn_to_irqhandler( - vif->domid, tx_evtchn, xenvif_interrupt, 0, - vif->dev->name, vif); + queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, + queue->name, queue); if (err < 0) goto err_unmap; - vif->tx_irq = vif->rx_irq = err; - disable_irq(vif->tx_irq); + queue->tx_irq = queue->rx_irq = err; + disable_irq(queue->tx_irq); } else { /* feature-split-event-channels == 1 */ - snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name), - "%s-tx", vif->dev->name); + snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), + "%s-tx", queue->name); err = bind_interdomain_evtchn_to_irqhandler( - vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, - vif->tx_irq_name, vif); + queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, + queue->tx_irq_name, queue); if (err < 0) goto err_unmap; - vif->tx_irq = err; - disable_irq(vif->tx_irq); + queue->tx_irq = err; + disable_irq(queue->tx_irq); - snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name), - "%s-rx", vif->dev->name); + snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), + "%s-rx", queue->name); err = bind_interdomain_evtchn_to_irqhandler( - vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, - vif->rx_irq_name, vif); + queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, + queue->rx_irq_name, queue); if (err < 0) goto err_tx_unbind; - vif->rx_irq = err; - disable_irq(vif->rx_irq); + queue->rx_irq = err; + disable_irq(queue->rx_irq); } - init_waitqueue_head(&vif->wq); - task = kthread_create(xenvif_kthread, - (void *)vif, "%s", vif->dev->name); + task = kthread_create(xenvif_kthread_guest_rx, + (void *)queue, "%s-guest-rx", queue->name); if (IS_ERR(task)) { - pr_warn("Could not allocate kthread for %s\n", vif->dev->name); + pr_warn("Could not allocate kthread for %s\n", queue->name); err = PTR_ERR(task); goto err_rx_unbind; } + queue->task = task; - vif->task = task; - - rtnl_lock(); - if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) - dev_set_mtu(vif->dev, ETH_DATA_LEN); - netdev_update_features(vif->dev); - netif_carrier_on(vif->dev); - if (netif_running(vif->dev)) - xenvif_up(vif); - rtnl_unlock(); + task = kthread_create(xenvif_dealloc_kthread, + (void *)queue, "%s-dealloc", queue->name); + if (IS_ERR(task)) { + pr_warn("Could not allocate kthread for %s\n", queue->name); + err = PTR_ERR(task); + goto err_rx_unbind; + } + queue->dealloc_task = task; - wake_up_process(vif->task); + wake_up_process(queue->task); + wake_up_process(queue->dealloc_task); return 0; err_rx_unbind: - unbind_from_irqhandler(vif->rx_irq, vif); - vif->rx_irq = 0; + unbind_from_irqhandler(queue->rx_irq, queue); + queue->rx_irq = 0; err_tx_unbind: - unbind_from_irqhandler(vif->tx_irq, vif); - vif->tx_irq = 0; + unbind_from_irqhandler(queue->tx_irq, queue); + queue->tx_irq = 0; err_unmap: - xenvif_unmap_frontend_rings(vif); + xenvif_unmap_frontend_rings(queue); err: module_put(THIS_MODULE); return err; @@ -468,36 +631,103 @@ void xenvif_carrier_off(struct xenvif *vif) rtnl_unlock(); } +static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, + unsigned int worst_case_skb_lifetime) +{ + int i, unmap_timeout = 0; + + for (i = 0; i < MAX_PENDING_REQS; ++i) { + if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { + unmap_timeout++; + schedule_timeout(msecs_to_jiffies(1000)); + if (unmap_timeout > worst_case_skb_lifetime && + net_ratelimit()) + netdev_err(queue->vif->dev, + "Page still granted! Index: %x\n", + i); + i = -1; + } + } +} + void xenvif_disconnect(struct xenvif *vif) { + struct xenvif_queue *queue = NULL; + unsigned int num_queues = vif->num_queues; + unsigned int queue_index; + if (netif_carrier_ok(vif->dev)) xenvif_carrier_off(vif); - if (vif->task) { - kthread_stop(vif->task); - vif->task = NULL; - } + for (queue_index = 0; queue_index < num_queues; ++queue_index) { + queue = &vif->queues[queue_index]; + + if (queue->task) { + del_timer_sync(&queue->wake_queue); + kthread_stop(queue->task); + queue->task = NULL; + } - if (vif->tx_irq) { - if (vif->tx_irq == vif->rx_irq) - unbind_from_irqhandler(vif->tx_irq, vif); - else { - unbind_from_irqhandler(vif->tx_irq, vif); - unbind_from_irqhandler(vif->rx_irq, vif); + if (queue->dealloc_task) { + kthread_stop(queue->dealloc_task); + queue->dealloc_task = NULL; } - vif->tx_irq = 0; + + if (queue->tx_irq) { + if (queue->tx_irq == queue->rx_irq) + unbind_from_irqhandler(queue->tx_irq, queue); + else { + unbind_from_irqhandler(queue->tx_irq, queue); + unbind_from_irqhandler(queue->rx_irq, queue); + } + queue->tx_irq = 0; + } + + xenvif_unmap_frontend_rings(queue); } +} - xenvif_unmap_frontend_rings(vif); +/* Reverse the relevant parts of xenvif_init_queue(). + * Used for queue teardown from xenvif_free(), and on the + * error handling paths in xenbus.c:connect(). + */ +void xenvif_deinit_queue(struct xenvif_queue *queue) +{ + free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); + netif_napi_del(&queue->napi); } void xenvif_free(struct xenvif *vif) { - netif_napi_del(&vif->napi); + struct xenvif_queue *queue = NULL; + unsigned int num_queues = vif->num_queues; + unsigned int queue_index; + /* Here we want to avoid timeout messages if an skb can be legitimately + * stuck somewhere else. Realistically this could be an another vif's + * internal or QDisc queue. That another vif also has this + * rx_drain_timeout_msecs timeout, but the timer only ditches the + * internal queue. After that, the QDisc queue can put in worst case + * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's + * internal queue, so we need several rounds of such timeouts until we + * can be sure that no another vif should have skb's from us. We are + * not sending more skb's, so newly stuck packets are not interesting + * for us here. + */ + unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * + DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); unregister_netdev(vif->dev); - vfree(vif->grant_copy_op); + for (queue_index = 0; queue_index < num_queues; ++queue_index) { + queue = &vif->queues[queue_index]; + xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); + xenvif_deinit_queue(queue); + } + + vfree(vif->queues); + vif->queues = NULL; + vif->num_queues = 0; + free_netdev(vif->dev); module_put(THIS_MODULE); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 78425554a53..c65b636bcab 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -37,9 +37,9 @@ #include <linux/kthread.h> #include <linux/if_vlan.h> #include <linux/udp.h> +#include <linux/highmem.h> #include <net/tcp.h> -#include <net/ip6_checksum.h> #include <xen/xen.h> #include <xen/events.h> @@ -55,6 +55,18 @@ bool separate_tx_rx_irq = 1; module_param(separate_tx_rx_irq, bool, 0644); +/* When guest ring is filled up, qdisc queues the packets for us, but we have + * to timeout them, otherwise other guests' packets can get stuck there + */ +unsigned int rx_drain_timeout_msecs = 10000; +module_param(rx_drain_timeout_msecs, uint, 0444); +unsigned int rx_drain_timeout_jiffies; + +unsigned int xenvif_max_queues; +module_param_named(max_queues, xenvif_max_queues, uint, 0644); +MODULE_PARM_DESC(max_queues, + "Maximum number of queues per virtual interface"); + /* * This is the maximum slots a skb can have. If a guest sends a skb * which exceeds this limit it is considered malicious. @@ -63,51 +75,48 @@ module_param(separate_tx_rx_irq, bool, 0644); static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; module_param(fatal_skb_slots, uint, 0444); -/* - * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating - * the maximum slots a valid packet can use. Now this value is defined - * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by - * all backend. - */ -#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN - -/* - * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of - * one or more merged tx requests, otherwise it is the continuation of - * previous tx request. - */ -static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx) -{ - return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; -} - -static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, u8 status); -static void make_tx_response(struct xenvif *vif, +static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st); -static inline int tx_work_todo(struct xenvif *vif); -static inline int rx_work_todo(struct xenvif *vif); +static inline int tx_work_todo(struct xenvif_queue *queue); +static inline int rx_work_todo(struct xenvif_queue *queue); -static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, s8 st, u16 offset, u16 size, u16 flags); -static inline unsigned long idx_to_pfn(struct xenvif *vif, +static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, u16 idx) { - return page_to_pfn(vif->mmap_pages[idx]); + return page_to_pfn(queue->mmap_pages[idx]); } -static inline unsigned long idx_to_kaddr(struct xenvif *vif, +static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, u16 idx) { - return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); + return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); +} + +#define callback_param(vif, pending_idx) \ + (vif->pending_tx_info[pending_idx].callback_struct) + +/* Find the containing VIF's structure from a pointer in pending_tx_info array + */ +static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) +{ + u16 pending_idx = ubuf->desc; + struct pending_tx_info *temp = + container_of(ubuf, struct pending_tx_info, callback_struct); + return container_of(temp - pending_idx, + struct xenvif_queue, + pending_tx_info[0]); } /* This is a miniumum size for the linear area to avoid lots of @@ -132,42 +141,26 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) -{ - return MAX_PENDING_REQS - - vif->pending_prod + vif->pending_cons; -} - -static int max_required_rx_slots(struct xenvif *vif) +bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) { - int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); - - /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ - if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask) - max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ - - return max; -} + RING_IDX prod, cons; -int xenvif_rx_ring_full(struct xenvif *vif) -{ - RING_IDX peek = vif->rx_req_cons_peek; - RING_IDX needed = max_required_rx_slots(vif); + do { + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; - return ((vif->rx.sring->req_prod - peek) < needed) || - ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); -} + if (prod - cons >= needed) + return true; -int xenvif_must_stop_queue(struct xenvif *vif) -{ - if (!xenvif_rx_ring_full(vif)) - return 0; + queue->rx.sring->req_event = prod + 1; - vif->rx.sring->req_event = vif->rx_req_cons_peek + - max_required_rx_slots(vif); - mb(); /* request notification /then/ check the queue */ + /* Make sure event is visible before we check prod + * again. + */ + mb(); + } while (queue->rx.sring->req_prod != prod); - return xenvif_rx_ring_full(vif); + return false; } /* @@ -175,7 +168,8 @@ int xenvif_must_stop_queue(struct xenvif *vif) * adding 'size' bytes to a buffer which currently contains 'offset' * bytes. */ -static bool start_new_rx_buffer(int offset, unsigned long size, int head) +static bool start_new_rx_buffer(int offset, unsigned long size, int head, + bool full_coalesce) { /* simple case: we have completely filled the current buffer. */ if (offset == MAX_BUFFER_OFFSET) @@ -187,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) * (i) this frag would fit completely in the next buffer * and (ii) there is already some data in the current buffer * and (iii) this is not the head buffer. + * and (iv) there is no need to fully utilize the buffers * * Where: * - (i) stops us splitting a frag into two copies @@ -197,106 +192,22 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) * by (ii) but is explicitly checked because * netfront relies on the first buffer being * non-empty and can crash otherwise. + * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS + * slot * * This means we will effectively linearise small * frags but do not needlessly split large buffers * into multiple copies tend to give large frags their * own buffers as before. */ - if ((offset + size > MAX_BUFFER_OFFSET) && - (size <= MAX_BUFFER_OFFSET) && offset && !head) + BUG_ON(size > MAX_BUFFER_OFFSET); + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head && + !full_coalesce) return true; return false; } -struct xenvif_count_slot_state { - unsigned long copy_off; - bool head; -}; - -unsigned int xenvif_count_frag_slots(struct xenvif *vif, - unsigned long offset, unsigned long size, - struct xenvif_count_slot_state *state) -{ - unsigned count = 0; - - offset &= ~PAGE_MASK; - - while (size > 0) { - unsigned long bytes; - - bytes = PAGE_SIZE - offset; - - if (bytes > size) - bytes = size; - - if (start_new_rx_buffer(state->copy_off, bytes, state->head)) { - count++; - state->copy_off = 0; - } - - if (state->copy_off + bytes > MAX_BUFFER_OFFSET) - bytes = MAX_BUFFER_OFFSET - state->copy_off; - - state->copy_off += bytes; - - offset += bytes; - size -= bytes; - - if (offset == PAGE_SIZE) - offset = 0; - - state->head = false; - } - - return count; -} - -/* - * Figure out how many ring slots we're going to need to send @skb to - * the guest. This function is essentially a dry run of - * xenvif_gop_frag_copy. - */ -unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) -{ - struct xenvif_count_slot_state state; - unsigned int count; - unsigned char *data; - unsigned i; - - state.head = true; - state.copy_off = 0; - - /* Slot for the first (partial) page of data. */ - count = 1; - - /* Need a slot for the GSO prefix for GSO extra data? */ - if (skb_shinfo(skb)->gso_size) - count++; - - data = skb->data; - while (data < skb_tail_pointer(skb)) { - unsigned long offset = offset_in_page(data); - unsigned long size = PAGE_SIZE - offset; - - if (data + size > skb_tail_pointer(skb)) - size = skb_tail_pointer(skb) - data; - - count += xenvif_count_frag_slots(vif, offset, size, &state); - - data += size; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); - unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; - - count += xenvif_count_frag_slots(vif, offset, size, &state); - } - return count; -} - struct netrx_pending_operations { unsigned copy_prod, copy_cons; unsigned meta_prod, meta_cons; @@ -306,13 +217,13 @@ struct netrx_pending_operations { grant_ref_t copy_gref; }; -static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, +static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, struct netrx_pending_operations *npo) { struct xenvif_rx_meta *meta; struct xen_netif_rx_request *req; - req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); + req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; @@ -326,19 +237,28 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, return meta; } +struct xenvif_rx_cb { + int meta_slots_used; + bool full_coalesce; +}; + +#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) + /* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. */ -static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, +static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, struct netrx_pending_operations *npo, struct page *page, unsigned long size, - unsigned long offset, int *head) + unsigned long offset, int *head, + struct xenvif_queue *foreign_queue, + grant_ref_t foreign_gref) { struct gnttab_copy *copy_gop; struct xenvif_rx_meta *meta; unsigned long bytes; - int gso_type; + int gso_type = XEN_NETIF_GSO_TYPE_NONE; /* Data must not cross a page boundary. */ BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); @@ -358,14 +278,17 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, if (bytes > size) bytes = size; - if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { + if (start_new_rx_buffer(npo->copy_off, + bytes, + *head, + XENVIF_RX_CB(skb)->full_coalesce)) { /* * Netfront requires there to be some data in the head * buffer. */ BUG_ON(*head); - meta = get_next_rx_buffer(vif, npo); + meta = get_next_rx_buffer(queue, npo); } if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) @@ -375,11 +298,18 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, copy_gop->flags = GNTCOPY_dest_gref; copy_gop->len = bytes; - copy_gop->source.domid = DOMID_SELF; - copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); + if (foreign_queue) { + copy_gop->source.domid = foreign_queue->vif->domid; + copy_gop->source.u.ref = foreign_gref; + copy_gop->flags |= GNTCOPY_source_gref; + } else { + copy_gop->source.domid = DOMID_SELF; + copy_gop->source.u.gmfn = + virt_to_mfn(page_address(page)); + } copy_gop->source.offset = offset; - copy_gop->dest.domid = vif->domid; + copy_gop->dest.domid = queue->vif->domid; copy_gop->dest.offset = npo->copy_off; copy_gop->dest.u.ref = npo->copy_gref; @@ -397,15 +327,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, } /* Leave a gap for the GSO descriptor. */ - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - else - gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } - if (*head && ((1 << gso_type) & vif->gso_mask)) - vif->rx.req_cons++; + if (*head && ((1 << gso_type) & queue->vif->gso_mask)) + queue->rx.req_cons++; *head = 0; /* There must be something in this buffer now. */ @@ -413,6 +343,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, } /* + * Find the grant ref for a given frag in a chain of struct ubuf_info's + * skb: the skb itself + * i: the frag's number + * ubuf: a pointer to an element in the chain. It should not be NULL + * + * Returns a pointer to the element in the chain where the page were found. If + * not found, returns NULL. + * See the definition of callback_struct in common.h for more details about + * the chain. + */ +static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, + const int i, + const struct ubuf_info *ubuf) +{ + struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf); + + do { + u16 pending_idx = ubuf->desc; + + if (skb_shinfo(skb)->frags[i].page.p == + foreign_queue->mmap_pages[pending_idx]) + break; + ubuf = (struct ubuf_info *) ubuf->ctx; + } while (ubuf); + + return ubuf; +} + +/* * Prepare an SKB to be transmitted to the frontend. * * This function is responsible for allocating grant operations, meta @@ -425,7 +384,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, * frontend-side LRO). */ static int xenvif_gop_skb(struct sk_buff *skb, - struct netrx_pending_operations *npo) + struct netrx_pending_operations *npo, + struct xenvif_queue *queue) { struct xenvif *vif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; @@ -436,37 +396,35 @@ static int xenvif_gop_skb(struct sk_buff *skb, int head = 1; int old_meta_prod; int gso_type; - int gso_size; + const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; + const struct ubuf_info *const head_ubuf = ubuf; old_meta_prod = npo->meta_prod; - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - gso_size = skb_shinfo(skb)->gso_size; - } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - gso_size = skb_shinfo(skb)->gso_size; - } else { - gso_type = XEN_NETIF_GSO_TYPE_NONE; - gso_size = 0; + gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; } /* Set up a GSO prefix descriptor, if necessary */ if ((1 << gso_type) & vif->gso_prefix_mask) { - req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); + req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_type = gso_type; - meta->gso_size = gso_size; + meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; meta->id = req->id; } - req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); + req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); meta = npo->meta + npo->meta_prod++; if ((1 << gso_type) & vif->gso_mask) { meta->gso_type = gso_type; - meta->gso_size = gso_size; + meta->gso_size = skb_shinfo(skb)->gso_size; } else { meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_size = 0; @@ -485,17 +443,69 @@ static int xenvif_gop_skb(struct sk_buff *skb, if (data + len > skb_tail_pointer(skb)) len = skb_tail_pointer(skb) - data; - xenvif_gop_frag_copy(vif, skb, npo, - virt_to_page(data), len, offset, &head); + xenvif_gop_frag_copy(queue, skb, npo, + virt_to_page(data), len, offset, &head, + NULL, + 0); data += len; } for (i = 0; i < nr_frags; i++) { - xenvif_gop_frag_copy(vif, skb, npo, + /* This variable also signals whether foreign_gref has a real + * value or not. + */ + struct xenvif_queue *foreign_queue = NULL; + grant_ref_t foreign_gref; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && + (ubuf->callback == &xenvif_zerocopy_callback)) { + const struct ubuf_info *const startpoint = ubuf; + + /* Ideally ubuf points to the chain element which + * belongs to this frag. Or if frags were removed from + * the beginning, then shortly before it. + */ + ubuf = xenvif_find_gref(skb, i, ubuf); + + /* Try again from the beginning of the list, if we + * haven't tried from there. This only makes sense in + * the unlikely event of reordering the original frags. + * For injected local pages it's an unnecessary second + * run. + */ + if (unlikely(!ubuf) && startpoint != head_ubuf) + ubuf = xenvif_find_gref(skb, i, head_ubuf); + + if (likely(ubuf)) { + u16 pending_idx = ubuf->desc; + + foreign_queue = ubuf_to_queue(ubuf); + foreign_gref = + foreign_queue->pending_tx_info[pending_idx].req.gref; + /* Just a safety measure. If this was the last + * element on the list, the for loop will + * iterate again if a local page were added to + * the end. Using head_ubuf here prevents the + * second search on the chain. Or the original + * frags changed order, but that's less likely. + * In any way, ubuf shouldn't be NULL. + */ + ubuf = ubuf->ctx ? + (struct ubuf_info *) ubuf->ctx : + head_ubuf; + } else + /* This frag was a local page, added to the + * array after the skb left netback. + */ + ubuf = head_ubuf; + } + xenvif_gop_frag_copy(queue, skb, npo, skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, - &head); + &head, + foreign_queue, + foreign_queue ? foreign_gref : UINT_MAX); } return npo->meta_prod - old_meta_prod; @@ -527,7 +537,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, return status; } -static void xenvif_add_frag_responses(struct xenvif *vif, int status, +static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, struct xenvif_rx_meta *meta, int nr_meta_slots) { @@ -548,21 +558,17 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status, flags = XEN_NETRXF_more_data; offset = 0; - make_rx_response(vif, meta[i].id, status, offset, + make_rx_response(queue, meta[i].id, status, offset, meta[i].size, flags); } } -struct skb_cb_overlay { - int meta_slots_used; -}; - -static void xenvif_kick_thread(struct xenvif *vif) +void xenvif_kick_thread(struct xenvif_queue *queue) { - wake_up(&vif->wq); + wake_up(&queue->wq); } -void xenvif_rx_action(struct xenvif *vif) +static void xenvif_rx_action(struct xenvif_queue *queue) { s8 status; u16 flags; @@ -571,73 +577,115 @@ void xenvif_rx_action(struct xenvif *vif) struct sk_buff *skb; LIST_HEAD(notify); int ret; - int nr_frags; - int count; unsigned long offset; - struct skb_cb_overlay *sco; - int need_to_notify = 0; + bool need_to_notify = false; struct netrx_pending_operations npo = { - .copy = vif->grant_copy_op, - .meta = vif->meta, + .copy = queue->grant_copy_op, + .meta = queue->meta, }; skb_queue_head_init(&rxq); - count = 0; + while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { + RING_IDX max_slots_needed; + RING_IDX old_req_cons; + RING_IDX ring_slots_used; + int i; - while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { - vif = netdev_priv(skb->dev); - nr_frags = skb_shinfo(skb)->nr_frags; + /* We need a cheap worse case estimate for the number of + * slots we'll use. + */ - sco = (struct skb_cb_overlay *)skb->cb; - sco->meta_slots_used = xenvif_gop_skb(skb, &npo); + max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + + skb_headlen(skb), + PAGE_SIZE); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + unsigned int size; + unsigned int offset; - count += nr_frags + 1; + size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + offset = skb_shinfo(skb)->frags[i].page_offset; - __skb_queue_tail(&rxq, skb); + /* For a worse-case estimate we need to factor in + * the fragment page offset as this will affect the + * number of times xenvif_gop_frag_copy() will + * call start_new_rx_buffer(). + */ + max_slots_needed += DIV_ROUND_UP(offset + size, + PAGE_SIZE); + } + + /* To avoid the estimate becoming too pessimal for some + * frontends that limit posted rx requests, cap the estimate + * at MAX_SKB_FRAGS. In this case netback will fully coalesce + * the skb into the provided slots. + */ + if (max_slots_needed > MAX_SKB_FRAGS) { + max_slots_needed = MAX_SKB_FRAGS; + XENVIF_RX_CB(skb)->full_coalesce = true; + } else { + XENVIF_RX_CB(skb)->full_coalesce = false; + } - /* Filled the batch queue? */ - /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ - if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) + /* We may need one more slot for GSO metadata */ + if (skb_is_gso(skb) && + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || + skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) + max_slots_needed++; + + /* If the skb may not fit then bail out now */ + if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) { + skb_queue_head(&queue->rx_queue, skb); + need_to_notify = true; + queue->rx_last_skb_slots = max_slots_needed; break; + } else + queue->rx_last_skb_slots = 0; + + old_req_cons = queue->rx.req_cons; + XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); + ring_slots_used = queue->rx.req_cons - old_req_cons; + + BUG_ON(ring_slots_used > max_slots_needed); + + __skb_queue_tail(&rxq, skb); } - BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); + BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); if (!npo.copy_prod) - return; + goto done; BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); - gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); + gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { - sco = (struct skb_cb_overlay *)skb->cb; - - vif = netdev_priv(skb->dev); - if ((1 << vif->meta[npo.meta_cons].gso_type) & - vif->gso_prefix_mask) { - resp = RING_GET_RESPONSE(&vif->rx, - vif->rx.rsp_prod_pvt++); + if ((1 << queue->meta[npo.meta_cons].gso_type) & + queue->vif->gso_prefix_mask) { + resp = RING_GET_RESPONSE(&queue->rx, + queue->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; - resp->offset = vif->meta[npo.meta_cons].gso_size; - resp->id = vif->meta[npo.meta_cons].id; - resp->status = sco->meta_slots_used; + resp->offset = queue->meta[npo.meta_cons].gso_size; + resp->id = queue->meta[npo.meta_cons].id; + resp->status = XENVIF_RX_CB(skb)->meta_slots_used; npo.meta_cons++; - sco->meta_slots_used--; + XENVIF_RX_CB(skb)->meta_slots_used--; } - vif->dev->stats.tx_bytes += skb->len; - vif->dev->stats.tx_packets++; + queue->stats.tx_bytes += skb->len; + queue->stats.tx_packets++; - status = xenvif_check_gop(vif, sco->meta_slots_used, &npo); + status = xenvif_check_gop(queue->vif, + XENVIF_RX_CB(skb)->meta_slots_used, + &npo); - if (sco->meta_slots_used == 1) + if (XENVIF_RX_CB(skb)->meta_slots_used == 1) flags = 0; else flags = XEN_NETRXF_more_data; @@ -649,22 +697,22 @@ void xenvif_rx_action(struct xenvif *vif) flags |= XEN_NETRXF_data_validated; offset = 0; - resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, + resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, status, offset, - vif->meta[npo.meta_cons].size, + queue->meta[npo.meta_cons].size, flags); - if ((1 << vif->meta[npo.meta_cons].gso_type) & - vif->gso_mask) { + if ((1 << queue->meta[npo.meta_cons].gso_type) & + queue->vif->gso_mask) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) - RING_GET_RESPONSE(&vif->rx, - vif->rx.rsp_prod_pvt++); + RING_GET_RESPONSE(&queue->rx, + queue->rx.rsp_prod_pvt++); resp->flags |= XEN_NETRXF_extra_info; - gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; - gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; + gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; + gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; gso->u.gso.pad = 0; gso->u.gso.features = 0; @@ -672,47 +720,34 @@ void xenvif_rx_action(struct xenvif *vif) gso->flags = 0; } - xenvif_add_frag_responses(vif, status, - vif->meta + npo.meta_cons + 1, - sco->meta_slots_used); + xenvif_add_frag_responses(queue, status, + queue->meta + npo.meta_cons + 1, + XENVIF_RX_CB(skb)->meta_slots_used); - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); - if (ret) - need_to_notify = 1; + need_to_notify |= !!ret; - xenvif_notify_tx_completion(vif); - - npo.meta_cons += sco->meta_slots_used; + npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; dev_kfree_skb(skb); } +done: if (need_to_notify) - notify_remote_via_irq(vif->rx_irq); - - /* More work to do? */ - if (!skb_queue_empty(&vif->rx_queue)) - xenvif_kick_thread(vif); -} - -void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) -{ - skb_queue_tail(&vif->rx_queue, skb); - - xenvif_kick_thread(vif); + notify_remote_via_irq(queue->rx_irq); } -void xenvif_check_rx_xenvif(struct xenvif *vif) +void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) { int more_to_do; - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); + RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); if (more_to_do) - napi_schedule(&vif->napi); + napi_schedule(&queue->napi); } -static void tx_add_credit(struct xenvif *vif) +static void tx_add_credit(struct xenvif_queue *queue) { unsigned long max_burst, max_credit; @@ -720,51 +755,57 @@ static void tx_add_credit(struct xenvif *vif) * Allow a burst big enough to transmit a jumbo packet of up to 128kB. * Otherwise the interface can seize up due to insufficient credit. */ - max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; + max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; max_burst = min(max_burst, 131072UL); - max_burst = max(max_burst, vif->credit_bytes); + max_burst = max(max_burst, queue->credit_bytes); /* Take care that adding a new chunk of credit doesn't wrap to zero. */ - max_credit = vif->remaining_credit + vif->credit_bytes; - if (max_credit < vif->remaining_credit) + max_credit = queue->remaining_credit + queue->credit_bytes; + if (max_credit < queue->remaining_credit) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ - vif->remaining_credit = min(max_credit, max_burst); + queue->remaining_credit = min(max_credit, max_burst); } static void tx_credit_callback(unsigned long data) { - struct xenvif *vif = (struct xenvif *)data; - tx_add_credit(vif); - xenvif_check_rx_xenvif(vif); + struct xenvif_queue *queue = (struct xenvif_queue *)data; + tx_add_credit(queue); + xenvif_napi_schedule_or_enable_events(queue); } -static void xenvif_tx_err(struct xenvif *vif, +static void xenvif_tx_err(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, RING_IDX end) { - RING_IDX cons = vif->tx.req_cons; + RING_IDX cons = queue->tx.req_cons; + unsigned long flags; do { - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); + spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; - txp = RING_GET_REQUEST(&vif->tx, cons++); + txp = RING_GET_REQUEST(&queue->tx, cons++); } while (1); - vif->tx.req_cons = cons; + queue->tx.req_cons = cons; } static void xenvif_fatal_tx_err(struct xenvif *vif) { netdev_err(vif->dev, "fatal error; disabling device\n"); - xenvif_carrier_off(vif); + vif->disabled = true; + /* Disable the vif from queue 0's kthread */ + if (vif->queues) + xenvif_kick_thread(&vif->queues[0]); } -static int xenvif_count_requests(struct xenvif *vif, +static int xenvif_count_requests(struct xenvif_queue *queue, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, int work_to_do) { - RING_IDX cons = vif->tx.req_cons; + RING_IDX cons = queue->tx.req_cons; int slots = 0; int drop_err = 0; int more_data; @@ -776,10 +817,10 @@ static int xenvif_count_requests(struct xenvif *vif, struct xen_netif_tx_request dropped_tx = { 0 }; if (slots >= work_to_do) { - netdev_err(vif->dev, + netdev_err(queue->vif->dev, "Asked for %d slots but exceeds this limit\n", work_to_do); - xenvif_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -ENODATA; } @@ -787,10 +828,10 @@ static int xenvif_count_requests(struct xenvif *vif, * considered malicious. */ if (unlikely(slots >= fatal_skb_slots)) { - netdev_err(vif->dev, + netdev_err(queue->vif->dev, "Malicious frontend using %d slots, threshold %u\n", slots, fatal_skb_slots); - xenvif_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -E2BIG; } @@ -803,7 +844,7 @@ static int xenvif_count_requests(struct xenvif *vif, */ if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { if (net_ratelimit()) - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Too many slots (%d) exceeding limit (%d), dropping packet\n", slots, XEN_NETBK_LEGACY_SLOTS_MAX); drop_err = -E2BIG; @@ -812,7 +853,7 @@ static int xenvif_count_requests(struct xenvif *vif, if (drop_err) txp = &dropped_tx; - memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), + memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), sizeof(*txp)); /* If the guest submitted a frame >= 64 KiB then @@ -826,7 +867,7 @@ static int xenvif_count_requests(struct xenvif *vif, */ if (!drop_err && txp->size > first->size) { if (net_ratelimit()) - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Invalid tx request, slot size %u > remaining size %u\n", txp->size, first->size); drop_err = -EIO; @@ -836,9 +877,9 @@ static int xenvif_count_requests(struct xenvif *vif, slots++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { - netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", + netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", txp->offset, txp->size); - xenvif_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -EINVAL; } @@ -850,219 +891,269 @@ static int xenvif_count_requests(struct xenvif *vif, } while (more_data); if (drop_err) { - xenvif_tx_err(vif, first, cons + slots); + xenvif_tx_err(queue, first, cons + slots); return drop_err; } return slots; } -static struct page *xenvif_alloc_page(struct xenvif *vif, - u16 pending_idx) + +struct xenvif_tx_cb { + u16 pending_idx; +}; + +#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) + +static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, + u16 pending_idx, + struct xen_netif_tx_request *txp, + struct gnttab_map_grant_ref *mop) { - struct page *page; + queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; + gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), + GNTMAP_host_map | GNTMAP_readonly, + txp->gref, queue->vif->domid); + + memcpy(&queue->pending_tx_info[pending_idx].req, txp, + sizeof(*txp)); +} - page = alloc_page(GFP_ATOMIC|__GFP_COLD); - if (!page) +static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) +{ + struct sk_buff *skb = + alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(skb == NULL)) return NULL; - vif->mmap_pages[pending_idx] = page; - return page; + /* Packets passed to netif_rx() must have some headroom. */ + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + /* Initialize it here to avoid later surprises */ + skb_shinfo(skb)->destructor_arg = NULL; + + return skb; } -static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, - struct sk_buff *skb, - struct xen_netif_tx_request *txp, - struct gnttab_copy *gop) +static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, + struct sk_buff *skb, + struct xen_netif_tx_request *txp, + struct gnttab_map_grant_ref *gop) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - u16 pending_idx = *((u16 *)skb->data); - u16 head_idx = 0; - int slot, start; - struct page *page; - pending_ring_idx_t index, start_idx = 0; - uint16_t dst_offset; - unsigned int nr_slots; - struct pending_tx_info *first = NULL; + u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + int start; + pending_ring_idx_t index; + unsigned int nr_slots, frag_overflow = 0; /* At this point shinfo->nr_frags is in fact the number of * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. */ + if (shinfo->nr_frags > MAX_SKB_FRAGS) { + frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; + BUG_ON(frag_overflow > MAX_SKB_FRAGS); + shinfo->nr_frags = MAX_SKB_FRAGS; + } nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); - /* Coalesce tx requests, at this point the packet passed in - * should be <= 64K. Any packets larger than 64K have been - * handled in xenvif_count_requests(). - */ - for (shinfo->nr_frags = slot = start; slot < nr_slots; - shinfo->nr_frags++) { - struct pending_tx_info *pending_tx_info = - vif->pending_tx_info; - - page = alloc_page(GFP_ATOMIC|__GFP_COLD); - if (!page) - goto err; - - dst_offset = 0; - first = NULL; - while (dst_offset < PAGE_SIZE && slot < nr_slots) { - gop->flags = GNTCOPY_source_gref; - - gop->source.u.ref = txp->gref; - gop->source.domid = vif->domid; - gop->source.offset = txp->offset; - - gop->dest.domid = DOMID_SELF; - - gop->dest.offset = dst_offset; - gop->dest.u.gmfn = virt_to_mfn(page_address(page)); - - if (dst_offset + txp->size > PAGE_SIZE) { - /* This page can only merge a portion - * of tx request. Do not increment any - * pointer / counter here. The txp - * will be dealt with in future - * rounds, eventually hitting the - * `else` branch. - */ - gop->len = PAGE_SIZE - dst_offset; - txp->offset += gop->len; - txp->size -= gop->len; - dst_offset += gop->len; /* quit loop */ - } else { - /* This tx request can be merged in the page */ - gop->len = txp->size; - dst_offset += gop->len; - - index = pending_index(vif->pending_cons++); - - pending_idx = vif->pending_ring[index]; + for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; + shinfo->nr_frags++, txp++, gop++) { + index = pending_index(queue->pending_cons++); + pending_idx = queue->pending_ring[index]; + xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + } - memcpy(&pending_tx_info[pending_idx].req, txp, - sizeof(*txp)); + if (frag_overflow) { + struct sk_buff *nskb = xenvif_alloc_skb(0); + if (unlikely(nskb == NULL)) { + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Can't allocate the frag_list skb.\n"); + return NULL; + } - /* Poison these fields, corresponding - * fields for head tx req will be set - * to correct values after the loop. - */ - vif->mmap_pages[pending_idx] = (void *)(~0UL); - pending_tx_info[pending_idx].head = - INVALID_PENDING_RING_IDX; - - if (!first) { - first = &pending_tx_info[pending_idx]; - start_idx = index; - head_idx = pending_idx; - } - - txp++; - slot++; - } + shinfo = skb_shinfo(nskb); + frags = shinfo->frags; - gop++; + for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + shinfo->nr_frags++, txp++, gop++) { + index = pending_index(queue->pending_cons++); + pending_idx = queue->pending_ring[index]; + xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + frag_set_pending_idx(&frags[shinfo->nr_frags], + pending_idx); } - first->req.offset = 0; - first->req.size = dst_offset; - first->head = start_idx; - vif->mmap_pages[head_idx] = page; - frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); + skb_shinfo(skb)->frag_list = nskb; } - BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS); - return gop; -err: - /* Unwind, freeing all pages and sending error responses. */ - while (shinfo->nr_frags-- > start) { - xenvif_idx_release(vif, - frag_get_pending_idx(&frags[shinfo->nr_frags]), - XEN_NETIF_RSP_ERROR); +} + +static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, + u16 pending_idx, + grant_handle_t handle) +{ + if (unlikely(queue->grant_tx_handle[pending_idx] != + NETBACK_INVALID_HANDLE)) { + netdev_err(queue->vif->dev, + "Trying to overwrite active handle! pending_idx: %x\n", + pending_idx); + BUG(); } - /* The head too, if necessary. */ - if (start) - xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); + queue->grant_tx_handle[pending_idx] = handle; +} - return NULL; +static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, + u16 pending_idx) +{ + if (unlikely(queue->grant_tx_handle[pending_idx] == + NETBACK_INVALID_HANDLE)) { + netdev_err(queue->vif->dev, + "Trying to unmap invalid handle! pending_idx: %x\n", + pending_idx); + BUG(); + } + queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; } -static int xenvif_tx_check_gop(struct xenvif *vif, +static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct sk_buff *skb, - struct gnttab_copy **gopp) + struct gnttab_map_grant_ref **gopp_map, + struct gnttab_copy **gopp_copy) { - struct gnttab_copy *gop = *gopp; - u16 pending_idx = *((u16 *)skb->data); + struct gnttab_map_grant_ref *gop_map = *gopp_map; + u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + /* This always points to the shinfo of the skb being checked, which + * could be either the first or the one on the frag_list + */ struct skb_shared_info *shinfo = skb_shinfo(skb); - struct pending_tx_info *tx_info; + /* If this is non-NULL, we are currently checking the frag_list skb, and + * this points to the shinfo of the first one + */ + struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; - int i, err, start; - u16 peek; /* peek into next tx request */ + const bool sharedslot = nr_frags && + frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; + int i, err; /* Check status of header. */ - err = gop->status; - if (unlikely(err)) - xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); - - /* Skip first skb fragment if it is on same page as header fragment. */ - start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + err = (*gopp_copy)->status; + if (unlikely(err)) { + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", + (*gopp_copy)->status, + pending_idx, + (*gopp_copy)->source.u.ref); + /* The first frag might still have this slot mapped */ + if (!sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + } + (*gopp_copy)++; - for (i = start; i < nr_frags; i++) { +check_frags: + for (i = 0; i < nr_frags; i++, gop_map++) { int j, newerr; - pending_ring_idx_t head; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); - tx_info = &vif->pending_tx_info[pending_idx]; - head = tx_info->head; /* Check error status: if okay then remember grant handle. */ - do { - newerr = (++gop)->status; - if (newerr) - break; - peek = vif->pending_ring[pending_index(++head)]; - } while (!pending_tx_is_head(vif, peek)); + newerr = gop_map->status; if (likely(!newerr)) { + xenvif_grant_handle_set(queue, + pending_idx, + gop_map->handle); /* Had a previous error? Invalidate this fragment. */ - if (unlikely(err)) - xenvif_idx_release(vif, pending_idx, - XEN_NETIF_RSP_OKAY); + if (unlikely(err)) { + xenvif_idx_unmap(queue, pending_idx); + /* If the mapping of the first frag was OK, but + * the header's copy failed, and they are + * sharing a slot, send an error + */ + if (i == 0 && sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + else + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } continue; } /* Error on this fragment: respond to client with an error. */ - xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", + i, + gop_map->status, + pending_idx, + gop_map->ref); + + xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) continue; - /* First error: invalidate header and preceding fragments. */ - pending_idx = *((u16 *)skb->data); - xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); - for (j = start; j < i; j++) { + /* First error: if the header haven't shared a slot with the + * first frag, release it as well. + */ + if (!sharedslot) + xenvif_idx_release(queue, + XENVIF_TX_CB(skb)->pending_idx, + XEN_NETIF_RSP_OKAY); + + /* Invalidate preceding fragments of this skb. */ + for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xenvif_idx_release(vif, pending_idx, + xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY); } + /* And if we found the error while checking the frag_list, unmap + * the first skb's frags + */ + if (first_shinfo) { + for (j = 0; j < first_shinfo->nr_frags; j++) { + pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); + xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } + } + /* Remember the error: invalidate all subsequent fragments. */ err = newerr; } - *gopp = gop + 1; + if (skb_has_frag_list(skb) && !first_shinfo) { + first_shinfo = skb_shinfo(skb); + shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); + nr_frags = shinfo->nr_frags; + + goto check_frags; + } + + *gopp_map = gop_map; return err; } -static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) +static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i; + u16 prev_pending_idx = INVALID_PENDING_IDX; for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = shinfo->frags + i; @@ -1072,46 +1163,62 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb) pending_idx = frag_get_pending_idx(frag); - txp = &vif->pending_tx_info[pending_idx].req; - page = virt_to_page(idx_to_kaddr(vif, pending_idx)); + /* If this is not the first frag, chain it to the previous*/ + if (prev_pending_idx == INVALID_PENDING_IDX) + skb_shinfo(skb)->destructor_arg = + &callback_param(queue, pending_idx); + else + callback_param(queue, prev_pending_idx).ctx = + &callback_param(queue, pending_idx); + + callback_param(queue, pending_idx).ctx = NULL; + prev_pending_idx = pending_idx; + + txp = &queue->pending_tx_info[pending_idx].req; + page = virt_to_page(idx_to_kaddr(queue, pending_idx)); __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; - /* Take an extra reference to offset xenvif_idx_release */ - get_page(vif->mmap_pages[pending_idx]); - xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); + /* Take an extra reference to offset network stack's put_page */ + get_page(queue->mmap_pages[pending_idx]); } + /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc + * overlaps with "index", and "mapping" is not set. I think mapping + * should be set. If delivered to local stack, it would drop this + * skb in sk_filter unless the socket has the right to use it. + */ + skb->pfmemalloc = false; } -static int xenvif_get_extras(struct xenvif *vif, +static int xenvif_get_extras(struct xenvif_queue *queue, struct xen_netif_extra_info *extras, int work_to_do) { struct xen_netif_extra_info extra; - RING_IDX cons = vif->tx.req_cons; + RING_IDX cons = queue->tx.req_cons; do { if (unlikely(work_to_do-- <= 0)) { - netdev_err(vif->dev, "Missing extra info\n"); - xenvif_fatal_tx_err(vif); + netdev_err(queue->vif->dev, "Missing extra info\n"); + xenvif_fatal_tx_err(queue->vif); return -EBADR; } - memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), + memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), sizeof(extra)); if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { - vif->tx.req_cons = ++cons; - netdev_err(vif->dev, + queue->tx.req_cons = ++cons; + netdev_err(queue->vif->dev, "Invalid extra type: %d\n", extra.type); - xenvif_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -EINVAL; } memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); - vif->tx.req_cons = ++cons; + queue->tx.req_cons = ++cons; } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); return work_to_do; @@ -1141,265 +1248,14 @@ static int xenvif_set_skb_gso(struct xenvif *vif, } skb_shinfo(skb)->gso_size = gso->u.gso.size; - - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; - - return 0; -} - -static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len, - unsigned int max) -{ - if (skb_headlen(skb) >= len) - return 0; - - /* If we need to pullup then pullup to the max, so we - * won't need to do it again. - */ - if (max > skb->len) - max = skb->len; - - if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) - return -ENOMEM; - - if (skb_headlen(skb) < len) - return -EPROTO; + /* gso_segs will be calculated later */ return 0; } -/* This value should be large enough to cover a tagged ethernet header plus - * maximally sized IP and TCP or UDP headers. - */ -#define MAX_IP_HDR_LEN 128 - -static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, - int recalculate_partial_csum) -{ - unsigned int off; - bool fragment; - int err; - - fragment = false; - - err = maybe_pull_tail(skb, - sizeof(struct iphdr), - MAX_IP_HDR_LEN); - if (err < 0) - goto out; - - if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) - fragment = true; - - off = ip_hdrlen(skb); - - err = -EPROTO; - - if (fragment) - goto out; - - switch (ip_hdr(skb)->protocol) { - case IPPROTO_TCP: - err = maybe_pull_tail(skb, - off + sizeof(struct tcphdr), - MAX_IP_HDR_LEN); - if (err < 0) - goto out; - - if (!skb_partial_csum_set(skb, off, - offsetof(struct tcphdr, check))) { - err = -EPROTO; - goto out; - } - - if (recalculate_partial_csum) - tcp_hdr(skb)->check = - ~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - skb->len - off, - IPPROTO_TCP, 0); - break; - case IPPROTO_UDP: - err = maybe_pull_tail(skb, - off + sizeof(struct udphdr), - MAX_IP_HDR_LEN); - if (err < 0) - goto out; - - if (!skb_partial_csum_set(skb, off, - offsetof(struct udphdr, check))) { - err = -EPROTO; - goto out; - } - - if (recalculate_partial_csum) - udp_hdr(skb)->check = - ~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - skb->len - off, - IPPROTO_UDP, 0); - break; - default: - goto out; - } - - err = 0; - -out: - return err; -} - -/* This value should be large enough to cover a tagged ethernet header plus - * an IPv6 header, all options, and a maximal TCP or UDP header. - */ -#define MAX_IPV6_HDR_LEN 256 - -#define OPT_HDR(type, skb, off) \ - (type *)(skb_network_header(skb) + (off)) - -static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, - int recalculate_partial_csum) -{ - int err; - u8 nexthdr; - unsigned int off; - unsigned int len; - bool fragment; - bool done; - - fragment = false; - done = false; - - off = sizeof(struct ipv6hdr); - - err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); - if (err < 0) - goto out; - - nexthdr = ipv6_hdr(skb)->nexthdr; - - len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); - while (off <= len && !done) { - switch (nexthdr) { - case IPPROTO_DSTOPTS: - case IPPROTO_HOPOPTS: - case IPPROTO_ROUTING: { - struct ipv6_opt_hdr *hp; - - err = maybe_pull_tail(skb, - off + - sizeof(struct ipv6_opt_hdr), - MAX_IPV6_HDR_LEN); - if (err < 0) - goto out; - - hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); - nexthdr = hp->nexthdr; - off += ipv6_optlen(hp); - break; - } - case IPPROTO_AH: { - struct ip_auth_hdr *hp; - - err = maybe_pull_tail(skb, - off + - sizeof(struct ip_auth_hdr), - MAX_IPV6_HDR_LEN); - if (err < 0) - goto out; - - hp = OPT_HDR(struct ip_auth_hdr, skb, off); - nexthdr = hp->nexthdr; - off += ipv6_authlen(hp); - break; - } - case IPPROTO_FRAGMENT: { - struct frag_hdr *hp; - - err = maybe_pull_tail(skb, - off + - sizeof(struct frag_hdr), - MAX_IPV6_HDR_LEN); - if (err < 0) - goto out; - - hp = OPT_HDR(struct frag_hdr, skb, off); - - if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) - fragment = true; - - nexthdr = hp->nexthdr; - off += sizeof(struct frag_hdr); - break; - } - default: - done = true; - break; - } - } - - err = -EPROTO; - - if (!done || fragment) - goto out; - - switch (nexthdr) { - case IPPROTO_TCP: - err = maybe_pull_tail(skb, - off + sizeof(struct tcphdr), - MAX_IPV6_HDR_LEN); - if (err < 0) - goto out; - - if (!skb_partial_csum_set(skb, off, - offsetof(struct tcphdr, check))) { - err = -EPROTO; - goto out; - } - - if (recalculate_partial_csum) - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - skb->len - off, - IPPROTO_TCP, 0); - break; - case IPPROTO_UDP: - err = maybe_pull_tail(skb, - off + sizeof(struct udphdr), - MAX_IPV6_HDR_LEN); - if (err < 0) - goto out; - - if (!skb_partial_csum_set(skb, off, - offsetof(struct udphdr, check))) { - err = -EPROTO; - goto out; - } - - if (recalculate_partial_csum) - udp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - skb->len - off, - IPPROTO_UDP, 0); - break; - default: - goto out; - } - - err = 0; - -out: - return err; -} - -static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) +static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) { - int err = -EPROTO; - int recalculate_partial_csum = 0; + bool recalculate_partial_csum = false; /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO @@ -1407,48 +1263,43 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { - vif->rx_gso_checksum_fixup++; + queue->stats.rx_gso_checksum_fixup++; skb->ip_summed = CHECKSUM_PARTIAL; - recalculate_partial_csum = 1; + recalculate_partial_csum = true; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - if (skb->protocol == htons(ETH_P_IP)) - err = checksum_setup_ip(vif, skb, recalculate_partial_csum); - else if (skb->protocol == htons(ETH_P_IPV6)) - err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum); - - return err; + return skb_checksum_setup(skb, recalculate_partial_csum); } -static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) +static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) { u64 now = get_jiffies_64(); - u64 next_credit = vif->credit_window_start + - msecs_to_jiffies(vif->credit_usec / 1000); + u64 next_credit = queue->credit_window_start + + msecs_to_jiffies(queue->credit_usec / 1000); /* Timer could already be pending in rare cases. */ - if (timer_pending(&vif->credit_timeout)) + if (timer_pending(&queue->credit_timeout)) return true; /* Passed the point where we can replenish credit? */ if (time_after_eq64(now, next_credit)) { - vif->credit_window_start = now; - tx_add_credit(vif); + queue->credit_window_start = now; + tx_add_credit(queue); } /* Still too big to send right now? Set a callback. */ - if (size > vif->remaining_credit) { - vif->credit_timeout.data = - (unsigned long)vif; - vif->credit_timeout.function = + if (size > queue->remaining_credit) { + queue->credit_timeout.data = + (unsigned long)queue; + queue->credit_timeout.function = tx_credit_callback; - mod_timer(&vif->credit_timeout, + mod_timer(&queue->credit_timeout, next_credit); - vif->credit_window_start = next_credit; + queue->credit_window_start = next_credit; return true; } @@ -1456,18 +1307,18 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) return false; } -static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) +static void xenvif_tx_build_gops(struct xenvif_queue *queue, + int budget, + unsigned *copy_ops, + unsigned *map_ops) { - struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; struct sk_buff *skb; int ret; - while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && - (skb_queue_len(&vif->tx_queue) < budget)) { + while (skb_queue_len(&queue->tx_queue) < budget) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; - struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; RING_IDX idx; @@ -1475,190 +1326,254 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) unsigned int data_len; pending_ring_idx_t index; - if (vif->tx.sring->req_prod - vif->tx.req_cons > + if (queue->tx.sring->req_prod - queue->tx.req_cons > XEN_NETIF_TX_RING_SIZE) { - netdev_err(vif->dev, + netdev_err(queue->vif->dev, "Impossible number of requests. " "req_prod %d, req_cons %d, size %ld\n", - vif->tx.sring->req_prod, vif->tx.req_cons, + queue->tx.sring->req_prod, queue->tx.req_cons, XEN_NETIF_TX_RING_SIZE); - xenvif_fatal_tx_err(vif); - continue; + xenvif_fatal_tx_err(queue->vif); + break; } - work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); + work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); if (!work_to_do) break; - idx = vif->tx.req_cons; + idx = queue->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ - memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); + memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); /* Credit-based scheduling. */ - if (txreq.size > vif->remaining_credit && - tx_credit_exceeded(vif, txreq.size)) + if (txreq.size > queue->remaining_credit && + tx_credit_exceeded(queue, txreq.size)) break; - vif->remaining_credit -= txreq.size; + queue->remaining_credit -= txreq.size; work_to_do--; - vif->tx.req_cons = ++idx; + queue->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); if (txreq.flags & XEN_NETTXF_extra_info) { - work_to_do = xenvif_get_extras(vif, extras, + work_to_do = xenvif_get_extras(queue, extras, work_to_do); - idx = vif->tx.req_cons; + idx = queue->tx.req_cons; if (unlikely(work_to_do < 0)) break; } - ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do); + ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) break; idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Bad packet size: %d\n", txreq.size); - xenvif_tx_err(vif, &txreq, idx); + xenvif_tx_err(queue, &txreq, idx); break; } /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { - netdev_err(vif->dev, + netdev_err(queue->vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); - xenvif_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); break; } - index = pending_index(vif->pending_cons); - pending_idx = vif->pending_ring[index]; + index = pending_index(queue->pending_cons); + pending_idx = queue->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? PKT_PROT_LEN : txreq.size; - skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, - GFP_ATOMIC | __GFP_NOWARN); + skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Can't allocate a skb in start_xmit.\n"); - xenvif_tx_err(vif, &txreq, idx); + xenvif_tx_err(queue, &txreq, idx); break; } - /* Packets passed to netif_rx() must have some headroom. */ - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; - if (xenvif_set_skb_gso(vif, skb, gso)) { + if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); break; } } - /* XXX could copy straight to head */ - page = xenvif_alloc_page(vif, pending_idx); - if (!page) { - kfree_skb(skb); - xenvif_tx_err(vif, &txreq, idx); - break; - } - - gop->source.u.ref = txreq.gref; - gop->source.domid = vif->domid; - gop->source.offset = txreq.offset; - - gop->dest.u.gmfn = virt_to_mfn(page_address(page)); - gop->dest.domid = DOMID_SELF; - gop->dest.offset = txreq.offset; + XENVIF_TX_CB(skb)->pending_idx = pending_idx; - gop->len = txreq.size; - gop->flags = GNTCOPY_source_gref; + __skb_put(skb, data_len); + queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; + queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; + queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; - gop++; + queue->tx_copy_ops[*copy_ops].dest.u.gmfn = + virt_to_mfn(skb->data); + queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; + queue->tx_copy_ops[*copy_ops].dest.offset = + offset_in_page(skb->data); - memcpy(&vif->pending_tx_info[pending_idx].req, - &txreq, sizeof(txreq)); - vif->pending_tx_info[pending_idx].head = index; - *((u16 *)skb->data) = pending_idx; + queue->tx_copy_ops[*copy_ops].len = data_len; + queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; - __skb_put(skb, data_len); + (*copy_ops)++; skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); + xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); + gop++; } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); + memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, + sizeof(txreq)); } - vif->pending_cons++; + queue->pending_cons++; - request_gop = xenvif_get_requests(vif, skb, txfrags, gop); + request_gop = xenvif_get_requests(queue, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); - xenvif_tx_err(vif, &txreq, idx); + xenvif_tx_err(queue, &txreq, idx); break; } gop = request_gop; - __skb_queue_tail(&vif->tx_queue, skb); + __skb_queue_tail(&queue->tx_queue, skb); - vif->tx.req_cons = idx; + queue->tx.req_cons = idx; - if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops)) + if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || + (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) break; } - return gop - vif->tx_copy_ops; + (*map_ops) = gop - queue->tx_map_ops; + return; } +/* Consolidate skb with a frag_list into a brand new one with local pages on + * frags. Returns 0 or -ENOMEM if can't allocate new pages. + */ +static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) +{ + unsigned int offset = skb_headlen(skb); + skb_frag_t frags[MAX_SKB_FRAGS]; + int i; + struct ubuf_info *uarg; + struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + + queue->stats.tx_zerocopy_sent += 2; + queue->stats.tx_frag_overflow++; + + xenvif_fill_frags(queue, nskb); + /* Subtract frags size, we will correct it later */ + skb->truesize -= skb->data_len; + skb->len += nskb->len; + skb->data_len += nskb->len; + + /* create a brand new frags array and coalesce there */ + for (i = 0; offset < skb->len; i++) { + struct page *page; + unsigned int len; -static int xenvif_tx_submit(struct xenvif *vif) + BUG_ON(i >= MAX_SKB_FRAGS); + page = alloc_page(GFP_ATOMIC|__GFP_COLD); + if (!page) { + int j; + skb->truesize += skb->data_len; + for (j = 0; j < i; j++) + put_page(frags[j].page.p); + return -ENOMEM; + } + + if (offset + PAGE_SIZE < skb->len) + len = PAGE_SIZE; + else + len = skb->len - offset; + if (skb_copy_bits(skb, offset, page_address(page), len)) + BUG(); + + offset += len; + frags[i].page.p = page; + frags[i].page_offset = 0; + skb_frag_size_set(&frags[i], len); + } + /* swap out with old one */ + memcpy(skb_shinfo(skb)->frags, + frags, + i * sizeof(skb_frag_t)); + skb_shinfo(skb)->nr_frags = i; + skb->truesize += i * PAGE_SIZE; + + /* remove traces of mapped pages and frag_list */ + skb_frag_list_init(skb); + uarg = skb_shinfo(skb)->destructor_arg; + uarg->callback(uarg, true); + skb_shinfo(skb)->destructor_arg = NULL; + + skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + kfree_skb(nskb); + + return 0; +} + +static int xenvif_tx_submit(struct xenvif_queue *queue) { - struct gnttab_copy *gop = vif->tx_copy_ops; + struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; + struct gnttab_copy *gop_copy = queue->tx_copy_ops; struct sk_buff *skb; int work_done = 0; - while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { + while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; unsigned data_len; - pending_idx = *((u16 *)skb->data); - txp = &vif->pending_tx_info[pending_idx].req; + pending_idx = XENVIF_TX_CB(skb)->pending_idx; + txp = &queue->pending_tx_info[pending_idx].req; /* Check the remap error code. */ - if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) { - netdev_dbg(vif->dev, "netback grant failed.\n"); + if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { + /* If there was an error, xenvif_tx_check_gop is + * expected to release all the frags which were mapped, + * so kfree_skb shouldn't do it again + */ skb_shinfo(skb)->nr_frags = 0; + if (skb_has_frag_list(skb)) { + struct sk_buff *nskb = + skb_shinfo(skb)->frag_list; + skb_shinfo(nskb)->nr_frags = 0; + } kfree_skb(skb); continue; } data_len = skb->len; - memcpy(skb->data, - (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), - data_len); + callback_param(queue, pending_idx).ctx = NULL; if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ - xenvif_idx_release(vif, pending_idx, + xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY); } @@ -1667,136 +1582,253 @@ static int xenvif_tx_submit(struct xenvif *vif) else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; - xenvif_fill_frags(vif, skb); + xenvif_fill_frags(queue, skb); + + if (unlikely(skb_has_frag_list(skb))) { + if (xenvif_handle_frag_list(queue, skb)) { + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Not enough memory to consolidate frag_list!\n"); + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + kfree_skb(skb); + continue; + } + } if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { int target = min_t(int, skb->len, PKT_PROT_LEN); __pskb_pull_tail(skb, target - skb_headlen(skb)); } - skb->dev = vif->dev; + skb->dev = queue->vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); - if (checksum_setup(vif, skb)) { - netdev_dbg(vif->dev, + if (checksum_setup(queue, skb)) { + netdev_dbg(queue->vif->dev, "Can't setup checksum in net_tx_action\n"); + /* We have to set this flag to trigger the callback */ + if (skb_shinfo(skb)->destructor_arg) + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; kfree_skb(skb); continue; } skb_probe_transport_header(skb, 0); - vif->dev->stats.rx_bytes += skb->len; - vif->dev->stats.rx_packets++; + /* If the packet is GSO then we will have just set up the + * transport header offset in checksum_setup so it's now + * straightforward to calculate gso_segs. + */ + if (skb_is_gso(skb)) { + int mss = skb_shinfo(skb)->gso_size; + int hdrlen = skb_transport_header(skb) - + skb_mac_header(skb) + + tcp_hdrlen(skb); + + skb_shinfo(skb)->gso_segs = + DIV_ROUND_UP(skb->len - hdrlen, mss); + } + + queue->stats.rx_bytes += skb->len; + queue->stats.rx_packets++; work_done++; + /* Set this flag right before netif_receive_skb, otherwise + * someone might think this packet already left netback, and + * do a skb_copy_ubufs while we are still in control of the + * skb. E.g. the __pskb_pull_tail earlier can do such thing. + */ + if (skb_shinfo(skb)->destructor_arg) { + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + queue->stats.tx_zerocopy_sent++; + } + netif_receive_skb(skb); } return work_done; } -/* Called after netfront has transmitted */ -int xenvif_tx_action(struct xenvif *vif, int budget) +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) { - unsigned nr_gops; - int work_done; - - if (unlikely(!tx_work_todo(vif))) - return 0; + unsigned long flags; + pending_ring_idx_t index; + struct xenvif_queue *queue = ubuf_to_queue(ubuf); - nr_gops = xenvif_tx_build_gops(vif, budget); - - if (nr_gops == 0) - return 0; - - gnttab_batch_copy(vif->tx_copy_ops, nr_gops); - - work_done = xenvif_tx_submit(vif); - - return work_done; + /* This is the only place where we grab this lock, to protect callbacks + * from each other. + */ + spin_lock_irqsave(&queue->callback_lock, flags); + do { + u16 pending_idx = ubuf->desc; + ubuf = (struct ubuf_info *) ubuf->ctx; + BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= + MAX_PENDING_REQS); + index = pending_index(queue->dealloc_prod); + queue->dealloc_ring[index] = pending_idx; + /* Sync with xenvif_tx_dealloc_action: + * insert idx then incr producer. + */ + smp_wmb(); + queue->dealloc_prod++; + } while (ubuf); + wake_up(&queue->dealloc_wq); + spin_unlock_irqrestore(&queue->callback_lock, flags); + + if (likely(zerocopy_success)) + queue->stats.tx_zerocopy_success++; + else + queue->stats.tx_zerocopy_fail++; } -static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, - u8 status) +static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) { - struct pending_tx_info *pending_tx_info; - pending_ring_idx_t head; - u16 peek; /* peek into next tx request */ + struct gnttab_unmap_grant_ref *gop; + pending_ring_idx_t dc, dp; + u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; + unsigned int i = 0; - BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL)); + dc = queue->dealloc_cons; + gop = queue->tx_unmap_ops; - /* Already complete? */ - if (vif->mmap_pages[pending_idx] == NULL) - return; + /* Free up any grants we have finished using */ + do { + dp = queue->dealloc_prod; - pending_tx_info = &vif->pending_tx_info[pending_idx]; + /* Ensure we see all indices enqueued by all + * xenvif_zerocopy_callback(). + */ + smp_rmb(); + + while (dc != dp) { + BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); + pending_idx = + queue->dealloc_ring[pending_index(dc++)]; + + pending_idx_release[gop-queue->tx_unmap_ops] = + pending_idx; + queue->pages_to_unmap[gop-queue->tx_unmap_ops] = + queue->mmap_pages[pending_idx]; + gnttab_set_unmap_op(gop, + idx_to_kaddr(queue, pending_idx), + GNTMAP_host_map, + queue->grant_tx_handle[pending_idx]); + xenvif_grant_handle_reset(queue, pending_idx); + ++gop; + } - head = pending_tx_info->head; + } while (dp != queue->dealloc_prod); + + queue->dealloc_cons = dc; + + if (gop - queue->tx_unmap_ops > 0) { + int ret; + ret = gnttab_unmap_refs(queue->tx_unmap_ops, + NULL, + queue->pages_to_unmap, + gop - queue->tx_unmap_ops); + if (ret) { + netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n", + gop - queue->tx_unmap_ops, ret); + for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { + if (gop[i].status != GNTST_okay) + netdev_err(queue->vif->dev, + " host_addr: %llx handle: %x status: %d\n", + gop[i].host_addr, + gop[i].handle, + gop[i].status); + } + BUG(); + } + } - BUG_ON(!pending_tx_is_head(vif, head)); - BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx); + for (i = 0; i < gop - queue->tx_unmap_ops; ++i) + xenvif_idx_release(queue, pending_idx_release[i], + XEN_NETIF_RSP_OKAY); +} - do { - pending_ring_idx_t index; - pending_ring_idx_t idx = pending_index(head); - u16 info_idx = vif->pending_ring[idx]; - pending_tx_info = &vif->pending_tx_info[info_idx]; - make_tx_response(vif, &pending_tx_info->req, status); +/* Called after netfront has transmitted */ +int xenvif_tx_action(struct xenvif_queue *queue, int budget) +{ + unsigned nr_mops, nr_cops = 0; + int work_done, ret; - /* Setting any number other than - * INVALID_PENDING_RING_IDX indicates this slot is - * starting a new packet / ending a previous packet. - */ - pending_tx_info->head = 0; + if (unlikely(!tx_work_todo(queue))) + return 0; + + xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); + + if (nr_cops == 0) + return 0; - index = pending_index(vif->pending_prod++); - vif->pending_ring[index] = vif->pending_ring[info_idx]; + gnttab_batch_copy(queue->tx_copy_ops, nr_cops); + if (nr_mops != 0) { + ret = gnttab_map_refs(queue->tx_map_ops, + NULL, + queue->pages_to_map, + nr_mops); + BUG_ON(ret); + } - peek = vif->pending_ring[pending_index(++head)]; + work_done = xenvif_tx_submit(queue); - } while (!pending_tx_is_head(vif, peek)); + return work_done; +} - put_page(vif->mmap_pages[pending_idx]); - vif->mmap_pages[pending_idx] = NULL; +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, + u8 status) +{ + struct pending_tx_info *pending_tx_info; + pending_ring_idx_t index; + unsigned long flags; + + pending_tx_info = &queue->pending_tx_info[pending_idx]; + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, &pending_tx_info->req, status); + index = pending_index(queue->pending_prod); + queue->pending_ring[index] = pending_idx; + /* TX shouldn't use the index before we give it back here */ + mb(); + queue->pending_prod++; + spin_unlock_irqrestore(&queue->response_lock, flags); } -static void make_tx_response(struct xenvif *vif, +static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st) { - RING_IDX i = vif->tx.rsp_prod_pvt; + RING_IDX i = queue->tx.rsp_prod_pvt; struct xen_netif_tx_response *resp; int notify; - resp = RING_GET_RESPONSE(&vif->tx, i); + resp = RING_GET_RESPONSE(&queue->tx, i); resp->id = txp->id; resp->status = st; if (txp->flags & XEN_NETTXF_extra_info) - RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; + RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; - vif->tx.rsp_prod_pvt = ++i; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); + queue->tx.rsp_prod_pvt = ++i; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) - notify_remote_via_irq(vif->tx_irq); + notify_remote_via_irq(queue->tx_irq); } -static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, s8 st, u16 offset, u16 size, u16 flags) { - RING_IDX i = vif->rx.rsp_prod_pvt; + RING_IDX i = queue->rx.rsp_prod_pvt; struct xen_netif_rx_response *resp; - resp = RING_GET_RESPONSE(&vif->rx, i); + resp = RING_GET_RESPONSE(&queue->rx, i); resp->offset = offset; resp->flags = flags; resp->id = id; @@ -1804,38 +1836,67 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, if (st < 0) resp->status = (s16)st; - vif->rx.rsp_prod_pvt = ++i; + queue->rx.rsp_prod_pvt = ++i; return resp; } -static inline int rx_work_todo(struct xenvif *vif) +void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) { - return !skb_queue_empty(&vif->rx_queue); + int ret; + struct gnttab_unmap_grant_ref tx_unmap_op; + + gnttab_set_unmap_op(&tx_unmap_op, + idx_to_kaddr(queue, pending_idx), + GNTMAP_host_map, + queue->grant_tx_handle[pending_idx]); + xenvif_grant_handle_reset(queue, pending_idx); + + ret = gnttab_unmap_refs(&tx_unmap_op, NULL, + &queue->mmap_pages[pending_idx], 1); + if (ret) { + netdev_err(queue->vif->dev, + "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", + ret, + pending_idx, + tx_unmap_op.host_addr, + tx_unmap_op.handle, + tx_unmap_op.status); + BUG(); + } } -static inline int tx_work_todo(struct xenvif *vif) +static inline int rx_work_todo(struct xenvif_queue *queue) { + return (!skb_queue_empty(&queue->rx_queue) && + xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) || + queue->rx_queue_purge; +} - if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && - (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS)) +static inline int tx_work_todo(struct xenvif_queue *queue) +{ + if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) return 1; return 0; } -void xenvif_unmap_frontend_rings(struct xenvif *vif) +static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) +{ + return queue->dealloc_cons != queue->dealloc_prod; +} + +void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) { - if (vif->tx.sring) - xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), - vif->tx.sring); - if (vif->rx.sring) - xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), - vif->rx.sring); + if (queue->tx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), + queue->tx.sring); + if (queue->rx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), + queue->rx.sring); } -int xenvif_map_frontend_rings(struct xenvif *vif, +int xenvif_map_frontend_rings(struct xenvif_queue *queue, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref) { @@ -1845,48 +1906,102 @@ int xenvif_map_frontend_rings(struct xenvif *vif, int err = -ENOMEM; - err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), tx_ring_ref, &addr); if (err) goto err; txs = (struct xen_netif_tx_sring *)addr; - BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); + BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); - err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), rx_ring_ref, &addr); if (err) goto err; rxs = (struct xen_netif_rx_sring *)addr; - BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); - - vif->rx_req_cons_peek = 0; + BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE); return 0; err: - xenvif_unmap_frontend_rings(vif); + xenvif_unmap_frontend_rings(queue); return err; } -int xenvif_kthread(void *data) +static void xenvif_start_queue(struct xenvif_queue *queue) { - struct xenvif *vif = data; + if (xenvif_schedulable(queue->vif)) + xenvif_wake_queue(queue); +} + +int xenvif_kthread_guest_rx(void *data) +{ + struct xenvif_queue *queue = data; + struct sk_buff *skb; while (!kthread_should_stop()) { - wait_event_interruptible(vif->wq, - rx_work_todo(vif) || + wait_event_interruptible(queue->wq, + rx_work_todo(queue) || + queue->vif->disabled || kthread_should_stop()); + + /* This frontend is found to be rogue, disable it in + * kthread context. Currently this is only set when + * netback finds out frontend sends malformed packet, + * but we cannot disable the interface in softirq + * context so we defer it here, if this thread is + * associated with queue 0. + */ + if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0)) + xenvif_carrier_off(queue->vif); + if (kthread_should_stop()) break; - if (rx_work_todo(vif)) - xenvif_rx_action(vif); + if (queue->rx_queue_purge) { + skb_queue_purge(&queue->rx_queue); + queue->rx_queue_purge = false; + } + + if (!skb_queue_empty(&queue->rx_queue)) + xenvif_rx_action(queue); + + if (skb_queue_empty(&queue->rx_queue) && + xenvif_queue_stopped(queue)) { + del_timer_sync(&queue->wake_queue); + xenvif_start_queue(queue); + } cond_resched(); } + /* Bin any remaining skbs */ + while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) + dev_kfree_skb(skb); + + return 0; +} + +int xenvif_dealloc_kthread(void *data) +{ + struct xenvif_queue *queue = data; + + while (!kthread_should_stop()) { + wait_event_interruptible(queue->dealloc_wq, + tx_dealloc_work_todo(queue) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + xenvif_tx_dealloc_action(queue); + cond_resched(); + } + + /* Unmap anything remaining*/ + if (tx_dealloc_work_todo(queue)) + xenvif_tx_dealloc_action(queue); + return 0; } @@ -1897,6 +2012,9 @@ static int __init netback_init(void) if (!xen_domain()) return -ENODEV; + /* Allow as many queues as there are CPUs, by default */ + xenvif_max_queues = num_online_cpus(); + if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); @@ -1907,6 +2025,8 @@ static int __init netback_init(void) if (rc) goto failed_init; + rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); + return 0; failed_init: diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index f0358992b04..3d85acd84ba 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -15,11 +15,12 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "common.h" +#include <linux/vmalloc.h> +#include <linux/rtnetlink.h> struct backend_info { struct xenbus_device *dev; @@ -35,8 +36,9 @@ struct backend_info { u8 have_hotplug_status_watch:1; }; -static int connect_rings(struct backend_info *); -static void connect(struct backend_info *); +static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); +static void connect(struct backend_info *be); +static int read_xenbus_vif_flags(struct backend_info *be); static void backend_create_xenvif(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be); static void set_backend_state(struct backend_info *be, @@ -158,6 +160,12 @@ static int netback_probe(struct xenbus_device *dev, if (err) pr_debug("Error writing feature-split-event-channels\n"); + /* Multi-queue support: This is an optional feature. */ + err = xenbus_printf(XBT_NIL, dev->nodename, + "multi-queue-max-queues", "%u", xenvif_max_queues); + if (err) + pr_debug("Error writing multi-queue-max-queues\n"); + err = xenbus_switch_state(dev, XenbusStateInitWait); if (err) goto fail; @@ -486,10 +494,26 @@ static void connect(struct backend_info *be) { int err; struct xenbus_device *dev = be->dev; + unsigned long credit_bytes, credit_usec; + unsigned int queue_index; + unsigned int requested_num_queues; + struct xenvif_queue *queue; - err = connect_rings(be); - if (err) + /* Check whether the frontend requested multiple queues + * and read the number requested. + */ + err = xenbus_scanf(XBT_NIL, dev->otherend, + "multi-queue-num-queues", + "%u", &requested_num_queues); + if (err < 0) { + requested_num_queues = 1; /* Fall back to single queue */ + } else if (requested_num_queues > xenvif_max_queues) { + /* buggy or malicious guest */ + xenbus_dev_fatal(dev, err, + "guest requested %u queues, exceeding the maximum of %u.", + requested_num_queues, xenvif_max_queues); return; + } err = xen_net_read_mac(dev, be->vif->fe_dev_addr); if (err) { @@ -497,9 +521,56 @@ static void connect(struct backend_info *be) return; } - xen_net_read_rate(dev, &be->vif->credit_bytes, - &be->vif->credit_usec); - be->vif->remaining_credit = be->vif->credit_bytes; + xen_net_read_rate(dev, &credit_bytes, &credit_usec); + read_xenbus_vif_flags(be); + + /* Use the number of queues requested by the frontend */ + be->vif->queues = vzalloc(requested_num_queues * + sizeof(struct xenvif_queue)); + be->vif->num_queues = requested_num_queues; + + for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { + queue = &be->vif->queues[queue_index]; + queue->vif = be->vif; + queue->id = queue_index; + snprintf(queue->name, sizeof(queue->name), "%s-q%u", + be->vif->dev->name, queue->id); + + err = xenvif_init_queue(queue); + if (err) { + /* xenvif_init_queue() cleans up after itself on + * failure, but we need to clean up any previously + * initialised queues. Set num_queues to i so that + * earlier queues can be destroyed using the regular + * disconnect logic. + */ + be->vif->num_queues = queue_index; + goto err; + } + + queue->remaining_credit = credit_bytes; + + err = connect_rings(be, queue); + if (err) { + /* connect_rings() cleans up after itself on failure, + * but we need to clean up after xenvif_init_queue() here, + * and also clean up any previously initialised queues. + */ + xenvif_deinit_queue(queue); + be->vif->num_queues = queue_index; + goto err; + } + } + + /* Initialisation completed, tell core driver the number of + * active queues. + */ + rtnl_lock(); + netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues); + netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues); + rtnl_unlock(); + + xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, @@ -508,45 +579,107 @@ static void connect(struct backend_info *be) if (!err) be->have_hotplug_status_watch = 1; - netif_wake_queue(be->vif->dev); + netif_tx_wake_all_queues(be->vif->dev); + + return; + +err: + if (be->vif->num_queues > 0) + xenvif_disconnect(be->vif); /* Clean up existing queues */ + vfree(be->vif->queues); + be->vif->queues = NULL; + be->vif->num_queues = 0; + return; } -static int connect_rings(struct backend_info *be) +static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) { - struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; + unsigned int num_queues = queue->vif->num_queues; unsigned long tx_ring_ref, rx_ring_ref; - unsigned int tx_evtchn, rx_evtchn, rx_copy; + unsigned int tx_evtchn, rx_evtchn; int err; - int val; + char *xspath; + size_t xspathsize; + const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ + + /* If the frontend requested 1 queue, or we have fallen back + * to single queue due to lack of frontend support for multi- + * queue, expect the remaining XenStore keys in the toplevel + * directory. Otherwise, expect them in a subdirectory called + * queue-N. + */ + if (num_queues == 1) { + xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL); + if (!xspath) { + xenbus_dev_fatal(dev, -ENOMEM, + "reading ring references"); + return -ENOMEM; + } + strcpy(xspath, dev->otherend); + } else { + xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; + xspath = kzalloc(xspathsize, GFP_KERNEL); + if (!xspath) { + xenbus_dev_fatal(dev, -ENOMEM, + "reading ring references"); + return -ENOMEM; + } + snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, + queue->id); + } - err = xenbus_gather(XBT_NIL, dev->otherend, + err = xenbus_gather(XBT_NIL, xspath, "tx-ring-ref", "%lu", &tx_ring_ref, "rx-ring-ref", "%lu", &rx_ring_ref, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref", - dev->otherend); - return err; + xspath); + goto err; } /* Try split event channels first, then single event channel. */ - err = xenbus_gather(XBT_NIL, dev->otherend, + err = xenbus_gather(XBT_NIL, xspath, "event-channel-tx", "%u", &tx_evtchn, "event-channel-rx", "%u", &rx_evtchn, NULL); if (err < 0) { - err = xenbus_scanf(XBT_NIL, dev->otherend, + err = xenbus_scanf(XBT_NIL, xspath, "event-channel", "%u", &tx_evtchn); if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/event-channel(-tx/rx)", - dev->otherend); - return err; + xspath); + goto err; } rx_evtchn = tx_evtchn; } + /* Map the shared frame, irq etc. */ + err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, + tx_evtchn, rx_evtchn); + if (err) { + xenbus_dev_fatal(dev, err, + "mapping shared-frames %lu/%lu port tx %u rx %u", + tx_ring_ref, rx_ring_ref, + tx_evtchn, rx_evtchn); + goto err; + } + + err = 0; +err: /* Regular return falls through with err == 0 */ + kfree(xspath); + return err; +} + +static int read_xenbus_vif_flags(struct backend_info *be) +{ + struct xenvif *vif = be->vif; + struct xenbus_device *dev = be->dev; + unsigned int rx_copy; + int err, val; + err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { @@ -622,16 +755,6 @@ static int connect_rings(struct backend_info *be) val = 0; vif->ipv6_csum = !!val; - /* Map the shared frame, irq etc. */ - err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, - tx_evtchn, rx_evtchn); - if (err) { - xenbus_dev_fatal(dev, err, - "mapping shared-frames %lu/%lu port tx %u rx %u", - tx_ring_ref, rx_ring_ref, - tx_evtchn, rx_evtchn); - return err; - } return 0; } |
