diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 159 |
1 files changed, 59 insertions, 100 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index fd00f25d985..682633bfe00 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -60,6 +60,9 @@ struct netbk_rx_meta { #define MAX_PENDING_REQS 256 +/* Discriminate from any valid pending_idx value. */ +#define INVALID_PENDING_IDX 0xFFFF + #define MAX_BUFFER_OFFSET PAGE_SIZE /* extra field used in struct page */ @@ -155,13 +158,13 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 flags); static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, - unsigned int idx) + u16 idx) { return page_to_pfn(netbk->mmap_pages[idx]); } static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, - unsigned int idx) + u16 idx) { return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); } @@ -215,6 +218,16 @@ static int get_page_ext(struct page *pg, sizeof(struct iphdr) + MAX_IPOPTLEN + \ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) +static u16 frag_get_pending_idx(skb_frag_t *frag) +{ + return (u16)frag->page_offset; +} + +static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) +{ + frag->page_offset = pending_idx; +} + static inline pending_ring_idx_t pending_index(unsigned i) { return i & (MAX_PENDING_REQS-1); @@ -312,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) unsigned int count; int i, copy_off; - count = DIV_ROUND_UP( - offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE); + count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); copy_off = skb_headlen(skb) % PAGE_SIZE; @@ -321,7 +333,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) count++; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - unsigned long size = skb_shinfo(skb)->frags[i].size; + unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); unsigned long bytes; while (size > 0) { BUG_ON(copy_off > MAX_BUFFER_OFFSET); @@ -382,7 +394,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, struct gnttab_copy *copy_gop; struct netbk_rx_meta *meta; /* - * These variables a used iff get_page_ext returns true, + * These variables are used iff get_page_ext returns true, * in which case they are guaranteed to be initialized. */ unsigned int uninitialized_var(group), uninitialized_var(idx); @@ -512,8 +524,8 @@ static int netbk_gop_skb(struct sk_buff *skb, for (i = 0; i < nr_frags; i++) { netbk_gop_frag_copy(vif, skb, npo, - skb_shinfo(skb)->frags[i].page, - skb_shinfo(skb)->frags[i].size, + skb_frag_page(&skb_shinfo(skb)->frags[i]), + skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, &head); } @@ -890,7 +902,7 @@ static int netbk_count_requests(struct xenvif *vif, static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, struct sk_buff *skb, - unsigned long pending_idx) + u16 pending_idx) { struct page *page; page = alloc_page(GFP_KERNEL|__GFP_COLD); @@ -909,11 +921,11 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - unsigned long pending_idx = *((u16 *)skb->data); + u16 pending_idx = *((u16 *)skb->data); int i, start; /* Skip first skb fragment if it is on same page as header fragment. */ - start = ((unsigned long)shinfo->frags[0].page == pending_idx); + start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < shinfo->nr_frags; i++, txp++) { struct page *page; @@ -927,8 +939,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, if (!page) return NULL; - netbk->mmap_pages[pending_idx] = page; - gop->source.u.ref = txp->gref; gop->source.domid = vif->domid; gop->source.offset = txp->offset; @@ -945,7 +955,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); xenvif_get(vif); pending_tx_info[pending_idx].vif = vif; - frags[i].page = (void *)pending_idx; + frag_set_pending_idx(&frags[i], pending_idx); } return gop; @@ -956,7 +966,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, struct gnttab_copy **gopp) { struct gnttab_copy *gop = *gopp; - int pending_idx = *((u16 *)skb->data); + u16 pending_idx = *((u16 *)skb->data); struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; struct xenvif *vif = pending_tx_info[pending_idx].vif; struct xen_netif_tx_request *txp; @@ -976,13 +986,13 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, } /* Skip first skb fragment if it is on same page as header fragment. */ - start = ((unsigned long)shinfo->frags[0].page == pending_idx); + start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < nr_frags; i++) { int j, newerr; pending_ring_idx_t index; - pending_idx = (unsigned long)shinfo->frags[i].page; + pending_idx = frag_get_pending_idx(&shinfo->frags[i]); /* Check error status: if okay then remember grant handle. */ newerr = (++gop)->status; @@ -1008,7 +1018,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, pending_idx = *((u16 *)skb->data); xen_netbk_idx_release(netbk, pending_idx); for (j = start; j < i; j++) { - pending_idx = (unsigned long)shinfo->frags[i].page; + pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xen_netbk_idx_release(netbk, pending_idx); } @@ -1029,15 +1039,14 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = shinfo->frags + i; struct xen_netif_tx_request *txp; - unsigned long pending_idx; + struct page *page; + u16 pending_idx; - pending_idx = (unsigned long)frag->page; + pending_idx = frag_get_pending_idx(frag); txp = &netbk->pending_tx_info[pending_idx].req; - frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); - frag->size = txp->size; - frag->page_offset = txp->offset; - + page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); + __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; @@ -1324,8 +1333,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) continue; } - netbk->mmap_pages[pending_idx] = page; - gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; @@ -1349,15 +1356,13 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { skb_shinfo(skb)->nr_frags++; - skb_shinfo(skb)->frags[0].page = - (void *)(unsigned long)pending_idx; + frag_set_pending_idx(&skb_shinfo(skb)->frags[0], + pending_idx); } else { - /* Discriminate from any valid pending_idx value. */ - skb_shinfo(skb)->frags[0].page = (void *)~0UL; + frag_set_pending_idx(&skb_shinfo(skb)->frags[0], + INVALID_PENDING_IDX); } - __skb_queue_tail(&netbk->tx_queue, skb); - netbk->pending_cons++; request_gop = xen_netbk_get_requests(netbk, vif, @@ -1369,6 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) } gop = request_gop; + __skb_queue_tail(&netbk->tx_queue, skb); + vif->tx.req_cons = idx; xen_netbk_check_rx_xenvif(vif); @@ -1577,88 +1584,42 @@ static int xen_netbk_kthread(void *data) void xen_netbk_unmap_frontend_rings(struct xenvif *vif) { - struct gnttab_unmap_grant_ref op; - - if (vif->tx.sring) { - gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr, - GNTMAP_host_map, vif->tx_shmem_handle); - - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) - BUG(); - } - - if (vif->rx.sring) { - gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr, - GNTMAP_host_map, vif->rx_shmem_handle); - - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) - BUG(); - } - if (vif->rx_comms_area) - free_vm_area(vif->rx_comms_area); - if (vif->tx_comms_area) - free_vm_area(vif->tx_comms_area); + if (vif->tx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), + vif->tx.sring); + if (vif->rx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), + vif->rx.sring); } int xen_netbk_map_frontend_rings(struct xenvif *vif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref) { - struct gnttab_map_grant_ref op; + void *addr; struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err = -ENOMEM; - vif->tx_comms_area = alloc_vm_area(PAGE_SIZE); - if (vif->tx_comms_area == NULL) - goto err; - - vif->rx_comms_area = alloc_vm_area(PAGE_SIZE); - if (vif->rx_comms_area == NULL) + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), + tx_ring_ref, &addr); + if (err) goto err; - gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr, - GNTMAP_host_map, tx_ring_ref, vif->domid); - - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) - BUG(); - - if (op.status) { - netdev_warn(vif->dev, - "failed to map tx ring. err=%d status=%d\n", - err, op.status); - err = op.status; - goto err; - } - - vif->tx_shmem_ref = tx_ring_ref; - vif->tx_shmem_handle = op.handle; - - txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr; + txs = (struct xen_netif_tx_sring *)addr; BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); - gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr, - GNTMAP_host_map, rx_ring_ref, vif->domid); - - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) - BUG(); - - if (op.status) { - netdev_warn(vif->dev, - "failed to map rx ring. err=%d status=%d\n", - err, op.status); - err = op.status; + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), + rx_ring_ref, &addr); + if (err) goto err; - } - vif->rx_shmem_ref = rx_ring_ref; - vif->rx_shmem_handle = op.handle; - vif->rx_req_cons_peek = 0; - - rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr; + rxs = (struct xen_netif_rx_sring *)addr; BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); + vif->rx_req_cons_peek = 0; + return 0; err: @@ -1672,15 +1633,13 @@ static int __init netback_init(void) int rc = 0; int group; - if (!xen_pv_domain()) + if (!xen_domain()) return -ENODEV; xen_netbk_group_nr = num_online_cpus(); xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); - if (!xen_netbk) { - printk(KERN_ALERT "%s: out of memory\n", __func__); + if (!xen_netbk) return -ENOMEM; - } for (group = 0; group < xen_netbk_group_nr; group++) { struct xen_netbk *netbk = &xen_netbk[group]; @@ -1702,7 +1661,7 @@ static int __init netback_init(void) "netback/%u", group); if (IS_ERR(netbk->task)) { - printk(KERN_ALERT "kthread_run() fails at netback\n"); + printk(KERN_ALERT "kthread_create() fails at netback\n"); del_timer(&netbk->net_timer); rc = PTR_ERR(netbk->task); goto failed_init; |