diff options
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_drv.c')
| -rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 1117 |
1 files changed, 607 insertions, 510 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 65860a99832..b76f7dcde0d 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -24,6 +24,7 @@ * */ +#include <linux/module.h> #include <net/ip6_checksum.h> #include "vmxnet3_int.h" @@ -42,11 +43,10 @@ static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = { MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); -static atomic_t devices_found; - -#define VMXNET3_MAX_DEVICES 10 static int enable_mq = 1; -static int irq_share_mode; + +static void +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); /* * Enable/Disable the given intr @@ -139,15 +139,18 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) { u32 ret; int i; + unsigned long flags; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + adapter->link_speed = ret >> 16; if (ret & 1) { /* Link is up. */ - printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", - adapter->netdev->name, adapter->link_speed); - if (!netif_carrier_ok(adapter->netdev)) - netif_carrier_on(adapter->netdev); + netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", + adapter->link_speed); + netif_carrier_on(adapter->netdev); if (affectTxQueue) { for (i = 0; i < adapter->num_tx_queues; i++) @@ -155,10 +158,8 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) adapter); } } else { - printk(KERN_INFO "%s: NIC Link is Down\n", - adapter->netdev->name); - if (netif_carrier_ok(adapter->netdev)) - netif_carrier_off(adapter->netdev); + netdev_info(adapter->netdev, "NIC Link is Down\n"); + netif_carrier_off(adapter->netdev); if (affectTxQueue) { for (i = 0; i < adapter->num_tx_queues; i++) @@ -171,6 +172,7 @@ static void vmxnet3_process_events(struct vmxnet3_adapter *adapter) { int i; + unsigned long flags; u32 events = le32_to_cpu(adapter->shared->ecr); if (!events) return; @@ -183,8 +185,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tqd_start[i].status.stopped) @@ -309,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, struct pci_dev *pdev) { if (tbi->map_type == VMXNET3_MAP_SINGLE) - pci_unmap_single(pdev, tbi->dma_addr, tbi->len, + dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, PCI_DMA_TODEVICE); else if (tbi->map_type == VMXNET3_MAP_PAGE) - pci_unmap_page(pdev, tbi->dma_addr, tbi->len, + dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, PCI_DMA_TODEVICE); else BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); @@ -395,10 +399,8 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { struct vmxnet3_tx_buf_info *tbi; - union Vmxnet3_GenericDesc *gdesc; tbi = tq->buf_info + tq->tx_ring.next2comp; - gdesc = tq->tx_ring.base + tq->tx_ring.next2comp; vmxnet3_unmap_tx_buf(tbi, adapter->pdev); if (tbi->skb) { @@ -427,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { if (tq->tx_ring.base) { - pci_free_consistent(adapter->pdev, tq->tx_ring.size * - sizeof(struct Vmxnet3_TxDesc), - tq->tx_ring.base, tq->tx_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * + sizeof(struct Vmxnet3_TxDesc), + tq->tx_ring.base, tq->tx_ring.basePA); tq->tx_ring.base = NULL; } if (tq->data_ring.base) { - pci_free_consistent(adapter->pdev, tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc), - tq->data_ring.base, tq->data_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc), + tq->data_ring.base, tq->data_ring.basePA); tq->data_ring.base = NULL; } if (tq->comp_ring.base) { - pci_free_consistent(adapter->pdev, tq->comp_ring.size * - sizeof(struct Vmxnet3_TxCompDesc), - tq->comp_ring.base, tq->comp_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc), + tq->comp_ring.base, tq->comp_ring.basePA); tq->comp_ring.base = NULL; } - kfree(tq->buf_info); - tq->buf_info = NULL; + if (tq->buf_info) { + dma_free_coherent(&adapter->pdev->dev, + tq->tx_ring.size * sizeof(tq->buf_info[0]), + tq->buf_info, tq->buf_info_pa); + tq->buf_info = NULL; + } } @@ -494,45 +500,40 @@ static int vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { + size_t sz; + BUG_ON(tq->tx_ring.base || tq->data_ring.base || tq->comp_ring.base || tq->buf_info); - tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size - * sizeof(struct Vmxnet3_TxDesc), - &tq->tx_ring.basePA); + tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), + &tq->tx_ring.basePA, GFP_KERNEL); if (!tq->tx_ring.base) { - printk(KERN_ERR "%s: failed to allocate tx ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate tx ring\n"); goto err; } - tq->data_ring.base = pci_alloc_consistent(adapter->pdev, - tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc), - &tq->data_ring.basePA); + tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), + &tq->data_ring.basePA, GFP_KERNEL); if (!tq->data_ring.base) { - printk(KERN_ERR "%s: failed to allocate data ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate data ring\n"); goto err; } - tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, - tq->comp_ring.size * - sizeof(struct Vmxnet3_TxCompDesc), - &tq->comp_ring.basePA); + tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), + &tq->comp_ring.basePA, GFP_KERNEL); if (!tq->comp_ring.base) { - printk(KERN_ERR "%s: failed to allocate tx comp ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); goto err; } - tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), - GFP_KERNEL); - if (!tq->buf_info) { - printk(KERN_ERR "%s: failed to allocate tx bufinfo\n", - adapter->netdev->name); + sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); + tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, + &tq->buf_info_pa, GFP_KERNEL); + if (!tq->buf_info) goto err; - } return 0; @@ -565,7 +566,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; u32 val; - while (num_allocated < num_to_alloc) { + while (num_allocated <= num_to_alloc) { struct vmxnet3_rx_buf_info *rbi; union Vmxnet3_GenericDesc *gd; @@ -574,16 +575,16 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { if (rbi->skb == NULL) { - rbi->skb = dev_alloc_skb(rbi->len + - NET_IP_ALIGN); + rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, + rbi->len, + GFP_KERNEL); if (unlikely(rbi->skb == NULL)) { rq->stats.rx_buf_alloc_failure++; break; } - rbi->skb->dev = adapter->netdev; - skb_reserve(rbi->skb, NET_IP_ALIGN); - rbi->dma_addr = pci_map_single(adapter->pdev, + rbi->dma_addr = dma_map_single( + &adapter->pdev->dev, rbi->skb->data, rbi->len, PCI_DMA_FROMDEVICE); } else { @@ -600,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, rq->stats.rx_buf_alloc_failure++; break; } - rbi->dma_addr = pci_map_page(adapter->pdev, + rbi->dma_addr = dma_map_page( + &adapter->pdev->dev, rbi->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); } else { @@ -611,18 +613,22 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, BUG_ON(rbi->dma_addr == 0); gd->rxd.addr = cpu_to_le64(rbi->dma_addr); - gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT) + gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | rbi->len); + /* Fill the last buffer but dont mark it ready, or else the + * device will think that the queue is full */ + if (num_allocated == num_to_alloc) + break; + + gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); num_allocated++; vmxnet3_cmd_ring_adv_next2fill(ring); } - rq->uncommitted[ring_idx] += num_allocated; - dev_dbg(&adapter->netdev->dev, - "alloc_rx_buf: %d allocated, next2fill %u, next2comp " - "%u, uncommited %u\n", num_allocated, ring->next2fill, - ring->next2comp, rq->uncommitted[ring_idx]); + netdev_dbg(adapter->netdev, + "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n", + num_allocated, ring->next2fill, ring->next2comp); /* so that the device can distinguish a full ring and an empty ring */ BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); @@ -640,10 +646,11 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); - frag->page = rbi->page; + __skb_frag_set_page(frag, rbi->page); frag->page_offset = 0; - frag->size = rcd->len; - skb->data_len += frag->size; + skb_frag_size_set(frag, rcd->len); + skb->data_len += rcd->len; + skb->truesize += PAGE_SIZE; skb_shinfo(skb)->nr_frags++; } @@ -678,7 +685,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi = tq->buf_info + tq->tx_ring.next2fill; tbi->map_type = VMXNET3_MAP_NONE; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", tq->tx_ring.next2fill, le64_to_cpu(ctx->sop_txd->txd.addr), @@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi = tq->buf_info + tq->tx_ring.next2fill; tbi->map_type = VMXNET3_MAP_SINGLE; - tbi->dma_addr = pci_map_single(adapter->pdev, + tbi->dma_addr = dma_map_single(&adapter->pdev->dev, skb->data + buf_offset, buf_size, PCI_DMA_TODEVICE); @@ -718,7 +725,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, gdesc->dword[2] = cpu_to_le32(dw2); gdesc->dword[3] = 0; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); @@ -730,29 +737,44 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + u32 buf_size; - tbi = tq->buf_info + tq->tx_ring.next2fill; - tbi->map_type = VMXNET3_MAP_PAGE; - tbi->dma_addr = pci_map_page(adapter->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); + buf_offset = 0; + len = skb_frag_size(frag); + while (len) { + tbi = tq->buf_info + tq->tx_ring.next2fill; + if (len < VMXNET3_MAX_TX_BUF_SIZE) { + buf_size = len; + dw2 |= len; + } else { + buf_size = VMXNET3_MAX_TX_BUF_SIZE; + /* spec says that for TxDesc.len, 0 == 2^14 */ + } + tbi->map_type = VMXNET3_MAP_PAGE; + tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, + buf_offset, buf_size, + DMA_TO_DEVICE); - tbi->len = frag->size; + tbi->len = buf_size; - gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; - BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); - gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); - gdesc->dword[2] = cpu_to_le32(dw2 | frag->size); - gdesc->dword[3] = 0; + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); + gdesc->dword[2] = cpu_to_le32(dw2); + gdesc->dword[3] = 0; - dev_dbg(&adapter->netdev->dev, - "txd[%u]: 0x%llu %u %u\n", - tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), - le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); - vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); - dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + netdev_dbg(adapter->netdev, + "txd[%u]: 0x%llu %u %u\n", + tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), + le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + + len -= buf_size; + buf_offset += buf_size; + } } ctx->eop_txd = gdesc; @@ -798,42 +820,29 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, { struct Vmxnet3_TxDataDesc *tdd; - if (ctx->mss) { + if (ctx->mss) { /* TSO */ ctx->eth_ip_hdr_size = skb_transport_offset(skb); - ctx->l4_hdr_size = ((struct tcphdr *) - skb_transport_header(skb))->doff * 4; + ctx->l4_hdr_size = tcp_hdrlen(skb); ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; } else { - unsigned int pull_size; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - ctx->eth_ip_hdr_size = skb_transport_offset(skb); + ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); if (ctx->ipv4) { - struct iphdr *iph = (struct iphdr *) - skb_network_header(skb); - if (iph->protocol == IPPROTO_TCP) { - pull_size = ctx->eth_ip_hdr_size + - sizeof(struct tcphdr); - - if (unlikely(!pskb_may_pull(skb, - pull_size))) { - goto err; - } - ctx->l4_hdr_size = ((struct tcphdr *) - skb_transport_header(skb))->doff * 4; - } else if (iph->protocol == IPPROTO_UDP) { - ctx->l4_hdr_size = - sizeof(struct udphdr); - } else { + const struct iphdr *iph = ip_hdr(skb); + + if (iph->protocol == IPPROTO_TCP) + ctx->l4_hdr_size = tcp_hdrlen(skb); + else if (iph->protocol == IPPROTO_UDP) + ctx->l4_hdr_size = sizeof(struct udphdr); + else ctx->l4_hdr_size = 0; - } } else { /* for simplicity, don't copy L4 headers */ ctx->l4_hdr_size = 0; } - ctx->copy_size = ctx->eth_ip_hdr_size + - ctx->l4_hdr_size; + ctx->copy_size = min(ctx->eth_ip_hdr_size + + ctx->l4_hdr_size, skb->len); } else { ctx->eth_ip_hdr_size = 0; ctx->l4_hdr_size = 0; @@ -856,7 +865,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, tdd = tq->data_ring.base + tq->tx_ring.next2fill; memcpy(tdd->data, skb->data, ctx->copy_size); - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "copy %u bytes to dataRing[%u]\n", ctx->copy_size, tq->tx_ring.next2fill); return 1; @@ -870,25 +879,40 @@ static void vmxnet3_prepare_tso(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx) { - struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); + struct tcphdr *tcph = tcp_hdr(skb); + if (ctx->ipv4) { - struct iphdr *iph = (struct iphdr *)skb_network_header(skb); + struct iphdr *iph = ip_hdr(skb); + iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); + struct ipv6hdr *iph = ipv6_hdr(skb); + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, IPPROTO_TCP, 0); } } +static int txd_estimate(const struct sk_buff *skb) +{ + int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + + count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); + } + return count; +} /* * Transmits a pkt thru a given tq * Returns: * NETDEV_TX_OK: descriptors are setup successfully - * NETDEV_TX_OK: error occured, the pkt is dropped + * NETDEV_TX_OK: error occurred, the pkt is dropped * NETDEV_TX_BUSY: tx ring is full, queue is stopped * * Side-effects: @@ -911,11 +935,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, union Vmxnet3_GenericDesc tempTxDesc; #endif - /* conservatively estimate # of descriptors to use */ - count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + - skb_shinfo(skb)->nr_frags + 1; + count = txd_estimate(skb); - ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP)); + ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); ctx.mss = skb_shinfo(skb)->gso_size; if (ctx.mss) { @@ -949,7 +971,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { tq->stats.tx_ring_full++; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "tx queue stopped on %s, next2comp %u" " next2fill %u\n", adapter->netdev->name, tq->tx_ring.next2comp, tq->tx_ring.next2fill); @@ -980,7 +1002,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, } } else { tq->stats.drop_hdr_inspect_err++; - goto drop_pkt; + goto unlock_drop_pkt; } /* fill tx descs related to addr & len */ @@ -1032,9 +1054,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, (struct Vmxnet3_TxDesc *)ctx.sop_txd); gdesc = ctx.sop_txd; #endif - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", - (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - + (u32)(ctx.sop_txd - tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); @@ -1052,9 +1074,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, hdr_too_big: tq->stats.drop_oversized_hdr++; +unlock_drop_pkt: + spin_unlock_irqrestore(&tq->tx_lock, flags); drop_pkt: tq->stats.drop_total++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1064,10 +1088,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - BUG_ON(skb->queue_mapping > adapter->num_tx_queues); - return vmxnet3_tq_xmit(skb, - &adapter->tx_queue[skb->queue_mapping], - adapter, netdev); + BUG_ON(skb->queue_mapping > adapter->num_tx_queues); + return vmxnet3_tq_xmit(skb, + &adapter->tx_queue[skb->queue_mapping], + adapter, netdev); } @@ -1076,7 +1100,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, struct sk_buff *skb, union Vmxnet3_GenericDesc *gdesc) { - if (!gdesc->rcd.cnc && adapter->rxcsum) { + if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { /* typical case: TCP/UDP over IP and both csums are correct */ if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { @@ -1129,8 +1153,11 @@ static int vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter, int quota) { - static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; + static const u32 rxprod_reg[2] = { + VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 + }; u32 num_rxd = 0; + bool skip_page_frags = false; struct Vmxnet3_RxCompDesc *rcd; struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; #ifdef __BIG_ENDIAN_BITFIELD @@ -1141,11 +1168,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, &rxComp); while (rcd->gen == rq->comp_ring.gen) { struct vmxnet3_rx_buf_info *rbi; - struct sk_buff *skb; + struct sk_buff *skb, *new_skb = NULL; + struct page *new_page = NULL; int num_to_alloc; struct Vmxnet3_RxDesc *rxd; u32 idx, ring_idx; - + struct vmxnet3_cmd_ring *ring = NULL; if (num_rxd >= quota) { /* we may stop even before we see the EOP desc of * the current pkt @@ -1156,6 +1184,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); idx = rcd->rxdIdx; ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; + ring = rq->rx_ring + ring_idx; vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, &rxCmdDesc); rbi = rq->buf_info[ring_idx] + idx; @@ -1178,88 +1207,139 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (unlikely(rcd->len == 0)) { /* Pretend the rx buffer is skipped. */ BUG_ON(!(rcd->sop && rcd->eop)); - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "rxRing[%u][%u] 0 length\n", ring_idx, idx); goto rcd_done; } + skip_page_frags = false; ctx->skb = rbi->skb; - rbi->skb = NULL; + new_skb = netdev_alloc_skb_ip_align(adapter->netdev, + rbi->len); + if (new_skb == NULL) { + /* Skb allocation failed, do not handover this + * skb to stack. Reuse it. Drop the existing pkt + */ + rq->stats.rx_buf_alloc_failure++; + ctx->skb = NULL; + rq->stats.drop_total++; + skip_page_frags = true; + goto rcd_done; + } - pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, + dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, + rbi->len, PCI_DMA_FROMDEVICE); +#ifdef VMXNET3_RSS + if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && + (adapter->netdev->features & NETIF_F_RXHASH)) + skb_set_hash(ctx->skb, + le32_to_cpu(rcd->rssHash), + PKT_HASH_TYPE_L3); +#endif skb_put(ctx->skb, rcd->len); + + /* Immediate refill */ + rbi->skb = new_skb; + rbi->dma_addr = dma_map_single(&adapter->pdev->dev, + rbi->skb->data, rbi->len, + PCI_DMA_FROMDEVICE); + rxd->addr = cpu_to_le64(rbi->dma_addr); + rxd->len = rbi->len; + } else { - BUG_ON(ctx->skb == NULL); + BUG_ON(ctx->skb == NULL && !skip_page_frags); + /* non SOP buffer must be type 1 in most cases */ - if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { - BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); + BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); + BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); - if (rcd->len) { - pci_unmap_page(adapter->pdev, - rbi->dma_addr, rbi->len, - PCI_DMA_FROMDEVICE); + /* If an sop buffer was dropped, skip all + * following non-sop fragments. They will be reused. + */ + if (skip_page_frags) + goto rcd_done; - vmxnet3_append_frag(ctx->skb, rcd, rbi); - rbi->page = NULL; - } - } else { - /* - * The only time a non-SOP buffer is type 0 is - * when it's EOP and error flag is raised, which - * has already been handled. + new_page = alloc_page(GFP_ATOMIC); + if (unlikely(new_page == NULL)) { + /* Replacement page frag could not be allocated. + * Reuse this page. Drop the pkt and free the + * skb which contained this page as a frag. Skip + * processing all the following non-sop frags. */ - BUG_ON(true); + rq->stats.rx_buf_alloc_failure++; + dev_kfree_skb(ctx->skb); + ctx->skb = NULL; + skip_page_frags = true; + goto rcd_done; + } + + if (rcd->len) { + dma_unmap_page(&adapter->pdev->dev, + rbi->dma_addr, rbi->len, + PCI_DMA_FROMDEVICE); + + vmxnet3_append_frag(ctx->skb, rcd, rbi); } + + /* Immediate refill */ + rbi->page = new_page; + rbi->dma_addr = dma_map_page(&adapter->pdev->dev, + rbi->page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + rxd->addr = cpu_to_le64(rbi->dma_addr); + rxd->len = rbi->len; } + skb = ctx->skb; if (rcd->eop) { skb->len += skb->data_len; - skb->truesize += skb->data_len; vmxnet3_rx_csum(adapter, skb, (union Vmxnet3_GenericDesc *)rcd); skb->protocol = eth_type_trans(skb, adapter->netdev); - if (unlikely(adapter->vlan_grp && rcd->ts)) { - vlan_hwaccel_receive_skb(skb, - adapter->vlan_grp, rcd->tci); - } else { + if (unlikely(rcd->ts)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); + + if (adapter->netdev->features & NETIF_F_LRO) netif_receive_skb(skb); - } + else + napi_gro_receive(&rq->napi, skb); ctx->skb = NULL; } rcd_done: - /* device may skip some rx descs */ - rq->rx_ring[ring_idx].next2comp = idx; - VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, - rq->rx_ring[ring_idx].size); - - /* refill rx buffers frequently to avoid starving the h/w */ - num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + - ring_idx); - if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, - ring_idx, adapter))) { - vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, - adapter); - - /* if needed, update the register */ - if (unlikely(rq->shared->updateRxProd)) { - VMXNET3_WRITE_BAR0_REG(adapter, - rxprod_reg[ring_idx] + rq->qid * 8, - rq->rx_ring[ring_idx].next2fill); - rq->uncommitted[ring_idx] = 0; - } + /* device may have skipped some rx descs */ + ring->next2comp = idx; + num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); + ring = rq->rx_ring + ring_idx; + while (num_to_alloc) { + vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, + &rxCmdDesc); + BUG_ON(!rxd->addr); + + /* Recv desc is ready to be used by the device */ + rxd->gen = ring->gen; + vmxnet3_cmd_ring_adv_next2fill(ring); + num_to_alloc--; + } + + /* if needed, update the register */ + if (unlikely(rq->shared->updateRxProd)) { + VMXNET3_WRITE_BAR0_REG(adapter, + rxprod_reg[ring_idx] + rq->qid * 8, + ring->next2fill); } vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); vmxnet3_getRxComp(rcd, - &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); + &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); } return num_rxd; @@ -1283,13 +1363,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && rq->buf_info[ring_idx][i].skb) { - pci_unmap_single(adapter->pdev, rxd->addr, + dma_unmap_single(&adapter->pdev->dev, rxd->addr, rxd->len, PCI_DMA_FROMDEVICE); dev_kfree_skb(rq->buf_info[ring_idx][i].skb); rq->buf_info[ring_idx][i].skb = NULL; } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && rq->buf_info[ring_idx][i].page) { - pci_unmap_page(adapter->pdev, rxd->addr, + dma_unmap_page(&adapter->pdev->dev, rxd->addr, rxd->len, PCI_DMA_FROMDEVICE); put_page(rq->buf_info[ring_idx][i].page); rq->buf_info[ring_idx][i].page = NULL; @@ -1299,7 +1379,6 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; rq->rx_ring[ring_idx].next2fill = rq->rx_ring[ring_idx].next2comp = 0; - rq->uncommitted[ring_idx] = 0; } rq->comp_ring.gen = VMXNET3_INIT_GEN; @@ -1317,8 +1396,8 @@ vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) } -void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, - struct vmxnet3_adapter *adapter) +static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) { int i; int j; @@ -1332,25 +1411,31 @@ void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } - kfree(rq->buf_info[0]); - for (i = 0; i < 2; i++) { if (rq->rx_ring[i].base) { - pci_free_consistent(adapter->pdev, rq->rx_ring[i].size - * sizeof(struct Vmxnet3_RxDesc), - rq->rx_ring[i].base, - rq->rx_ring[i].basePA); + dma_free_coherent(&adapter->pdev->dev, + rq->rx_ring[i].size + * sizeof(struct Vmxnet3_RxDesc), + rq->rx_ring[i].base, + rq->rx_ring[i].basePA); rq->rx_ring[i].base = NULL; } rq->buf_info[i] = NULL; } if (rq->comp_ring.base) { - pci_free_consistent(adapter->pdev, rq->comp_ring.size * - sizeof(struct Vmxnet3_RxCompDesc), - rq->comp_ring.base, rq->comp_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size + * sizeof(struct Vmxnet3_RxCompDesc), + rq->comp_ring.base, rq->comp_ring.basePA); rq->comp_ring.base = NULL; } + + if (rq->buf_info[0]) { + size_t sz = sizeof(struct vmxnet3_rx_buf_info) * + (rq->rx_ring[0].size + rq->rx_ring[1].size); + dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], + rq->buf_info_pa); + } } @@ -1380,7 +1465,6 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, /* reset internal state and allocate buffers for both rings */ for (i = 0; i < 2; i++) { rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; - rq->uncommitted[i] = 0; memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc)); @@ -1436,32 +1520,33 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) for (i = 0; i < 2; i++) { sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); - rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, - &rq->rx_ring[i].basePA); + rq->rx_ring[i].base = dma_alloc_coherent( + &adapter->pdev->dev, sz, + &rq->rx_ring[i].basePA, + GFP_KERNEL); if (!rq->rx_ring[i].base) { - printk(KERN_ERR "%s: failed to allocate rx ring %d\n", - adapter->netdev->name, i); + netdev_err(adapter->netdev, + "failed to allocate rx ring %d\n", i); goto err; } } sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); - rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, - &rq->comp_ring.basePA); + rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, + &rq->comp_ring.basePA, + GFP_KERNEL); if (!rq->comp_ring.base) { - printk(KERN_ERR "%s: failed to allocate rx comp ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); goto err; } sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + rq->rx_ring[1].size); - bi = kzalloc(sz, GFP_KERNEL); - if (!bi) { - printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", - adapter->netdev->name); + bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, + GFP_KERNEL); + if (!bi) goto err; - } + rq->buf_info[0] = bi; rq->buf_info[1] = bi + rq->rx_ring[0].size; @@ -1677,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_all_intrs(adapter); - - vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); - vmxnet3_enable_all_intrs(adapter); + switch (adapter->intr.type) { +#ifdef CONFIG_PCI_MSI + case VMXNET3_IT_MSIX: { + int i; + for (i = 0; i < adapter->num_rx_queues; i++) + vmxnet3_msix_rx(0, &adapter->rx_queue[i]); + break; + } +#endif + case VMXNET3_IT_MSI: + default: + vmxnet3_intr(0, adapter->netdev); + break; + } } #endif /* CONFIG_NET_POLL_CONTROLLER */ @@ -1744,9 +1838,10 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) adapter->rx_queue[i].name, &(adapter->rx_queue[i])); if (err) { - printk(KERN_ERR "Failed to request irq for MSIX" - ", %s, error %d\n", - adapter->rx_queue[i].name, err); + netdev_err(adapter->netdev, + "Failed to request irq for MSIX, " + "%s, error %d\n", + adapter->rx_queue[i].name, err); return err; } @@ -1775,8 +1870,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) #endif intr->num_intrs = vector + 1; if (err) { - printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" - ":%d\n", adapter->netdev->name, intr->type, err); + netdev_err(adapter->netdev, + "Failed to request irq (intr type:%d), error %d\n", + intr->type, err); } else { /* Number of rx queues will not change after this */ for (i = 0; i < adapter->num_rx_queues; i++) { @@ -1797,9 +1893,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) adapter->rx_queue[0].comp_ring.intr_idx = 0; } - printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " - "allocated\n", adapter->netdev->name, intr->type, - intr->mask_mode, intr->num_intrs); + netdev_info(adapter->netdev, + "intr type %u, mode %u, %u vectors allocated\n", + intr->type, intr->mask_mode, intr->num_intrs); } return err; @@ -1845,112 +1941,66 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) free_irq(adapter->pdev->irq, adapter->netdev); break; default: - BUG_ON(true); + BUG(); } } + static void -vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) +vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) { - struct vmxnet3_adapter *adapter = netdev_priv(netdev); - struct Vmxnet3_DriverShared *shared = adapter->shared; u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + u16 vid; - if (grp) { - /* add vlan rx stripping. */ - if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { - int i; - struct Vmxnet3_DSDevRead *devRead = &shared->devRead; - adapter->vlan_grp = grp; - - /* update FEATURES to device */ - devRead->misc.uptFeatures |= UPT1_F_RXVLAN; - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_FEATURE); - /* - * Clear entire vfTable; then enable untagged pkts. - * Note: setting one entry in vfTable to non-zero turns - * on VLAN rx filtering. - */ - for (i = 0; i < VMXNET3_VFT_SIZE; i++) - vfTable[i] = 0; + /* allow untagged pkts */ + VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); - VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_VLAN_FILTERS); - } else { - printk(KERN_ERR "%s: vlan_rx_register when device has " - "no NETIF_F_HW_VLAN_RX\n", netdev->name); - } - } else { - /* remove vlan rx stripping. */ - struct Vmxnet3_DSDevRead *devRead = &shared->devRead; - adapter->vlan_grp = NULL; - - if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { - int i; - - for (i = 0; i < VMXNET3_VFT_SIZE; i++) { - /* clear entire vfTable; this also disables - * VLAN rx filtering - */ - vfTable[i] = 0; - } - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_VLAN_FILTERS); - - /* update FEATURES to device */ - devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_FEATURE); - } - } + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); } -static void -vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) +static int +vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { - if (adapter->vlan_grp) { - u16 vid; + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (!(netdev->flags & IFF_PROMISC)) { u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; - bool activeVlan = false; + unsigned long flags; - for (vid = 0; vid < VLAN_N_VID; vid++) { - if (vlan_group_get_device(adapter->vlan_grp, vid)) { - VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); - activeVlan = true; - } - } - if (activeVlan) { - /* continue to allow untagged pkts */ - VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); - } + VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } + + set_bit(vid, adapter->active_vlans); + + return 0; } -static void -vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int +vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; - VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_VLAN_FILTERS); -} + if (!(netdev->flags & IFF_PROMISC)) { + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; + VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } -static void -vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct vmxnet3_adapter *adapter = netdev_priv(netdev); - u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + clear_bit(vid, adapter->active_vlans); - VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_VLAN_FILTERS); + return 0; } @@ -1981,13 +2031,21 @@ static void vmxnet3_set_mc(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + unsigned long flags; struct Vmxnet3_RxFilterConf *rxConf = &adapter->shared->devRead.rxFilterConf; u8 *new_table = NULL; + dma_addr_t new_table_pa = 0; u32 new_mode = VMXNET3_RXM_UCAST; - if (netdev->flags & IFF_PROMISC) + if (netdev->flags & IFF_PROMISC) { + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable)); + new_mode |= VMXNET3_RXM_PROMISC; + } else { + vmxnet3_restore_vlan(adapter); + } if (netdev->flags & IFF_BROADCAST) new_mode |= VMXNET3_RXM_BCAST; @@ -2001,11 +2059,15 @@ vmxnet3_set_mc(struct net_device *netdev) new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTableLen = cpu_to_le16( netdev_mc_count(netdev) * ETH_ALEN); - rxConf->mfTablePA = cpu_to_le64(virt_to_phys( - new_table)); + new_table_pa = dma_map_single( + &adapter->pdev->dev, + new_table, + rxConf->mfTableLen, + PCI_DMA_TODEVICE); + rxConf->mfTablePA = cpu_to_le64(new_table_pa); } else { - printk(KERN_INFO "%s: failed to copy mcast list" - ", setting ALL_MULTI\n", netdev->name); + netdev_info(netdev, "failed to copy mcast list" + ", setting ALL_MULTI\n"); new_mode |= VMXNET3_RXM_ALL_MULTI; } } @@ -2016,16 +2078,24 @@ vmxnet3_set_mc(struct net_device *netdev) rxConf->mfTablePA = 0; } + spin_lock_irqsave(&adapter->cmd_lock, flags); if (new_mode != rxConf->rxMode) { rxConf->rxMode = cpu_to_le32(new_mode); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); } VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_MAC_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); - kfree(new_table); + if (new_table) { + dma_unmap_single(&adapter->pdev->dev, new_table_pa, + rxConf->mfTableLen, PCI_DMA_TODEVICE); + kfree(new_table); + } } void @@ -2065,21 +2135,19 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); - devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); + devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); /* set up feature flags */ - if (adapter->rxcsum) + if (adapter->netdev->features & NETIF_F_RXCSUM) devRead->misc.uptFeatures |= UPT1_F_RXCSUM; - if (adapter->lro) { + if (adapter->netdev->features & NETIF_F_LRO) { devRead->misc.uptFeatures |= UPT1_F_LRO; devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); } - if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && - adapter->vlan_grp) { + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; - } devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); @@ -2096,7 +2164,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); - tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); + tqc->ddPA = cpu_to_le64(tq->buf_info_pa); tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); @@ -2114,8 +2182,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); - rqc->ddPA = cpu_to_le64(virt_to_phys( - rq->buf_info)); + rqc->ddPA = cpu_to_le64(rq->buf_info_pa); rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); @@ -2131,6 +2198,14 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) if (adapter->rss) { struct UPT1_RSSConf *rssConf = adapter->rss_conf; + static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { + 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, + 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, + 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, + 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, + 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, + }; + devRead->misc.uptFeatures |= UPT1_F_RSS; devRead->misc.numRxQueues = adapter->num_rx_queues; rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | @@ -2140,13 +2215,16 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; - get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); + memcpy(rssConf->hashKey, rss_key, sizeof(rss_key)); + for (i = 0; i < rssConf->indTableSize; i++) - rssConf->indTable[i] = i % adapter->num_rx_queues; + rssConf->indTable[i] = ethtool_rxfh_indir_default( + i, adapter->num_rx_queues); devRead->rssConfDesc.confVer = 1; - devRead->rssConfDesc.confLen = sizeof(*rssConf); - devRead->rssConfDesc.confPA = virt_to_phys(rssConf); + devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); + devRead->rssConfDesc.confPA = + cpu_to_le64(adapter->rss_conf_pa); } #endif /* VMXNET3_RSS */ @@ -2164,6 +2242,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) /* rx filter settings */ devRead->rxFilterConf.rxMode = 0; vmxnet3_restore_vlan(adapter); + vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); + /* the rest are already zeroed */ } @@ -2173,8 +2253,9 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) { int err, i; u32 ret; + unsigned long flags; - dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," + netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," " ring sizes %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, adapter->rx_buf_per_pkt, adapter->tx_queue[0].tx_ring.size, @@ -2184,15 +2265,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) vmxnet3_tq_init_all(adapter); err = vmxnet3_rq_init_all(adapter); if (err) { - printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", - adapter->netdev->name, err); + netdev_err(adapter->netdev, + "Failed to init rx queue error %d\n", err); goto rq_err; } err = vmxnet3_request_irqs(adapter); if (err) { - printk(KERN_ERR "Failed to setup irq for %s: error %d\n", - adapter->netdev->name, err); + netdev_err(adapter->netdev, + "Failed to setup irq for error %d\n", err); goto irq_err; } @@ -2202,13 +2283,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) adapter->shared_pa)); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( adapter->shared_pa)); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); if (ret != 0) { - printk(KERN_ERR "Failed to activate dev %s: error %u\n", - adapter->netdev->name, ret); + netdev_err(adapter->netdev, + "Failed to activate dev: error %u\n", ret); err = -EINVAL; goto activate_err; } @@ -2251,7 +2334,10 @@ rq_err: void vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) { + unsigned long flags; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); } @@ -2259,12 +2345,15 @@ int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) { int i; + unsigned long flags; if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) return 0; + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); vmxnet3_disable_all_intrs(adapter); for (i = 0; i < adapter->num_rx_queues; i++) @@ -2317,23 +2406,22 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) err = pci_enable_device(pdev); if (err) { - printk(KERN_ERR "Failed to enable adapter %s: error %d\n", - pci_name(pdev), err); + dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); return err; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - printk(KERN_ERR "pci_set_consistent_dma_mask failed " - "for adapter %s\n", pci_name(pdev)); + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed\n"); err = -EIO; goto err_set_mask; } *dma64 = true; } else { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - printk(KERN_ERR "pci_set_dma_mask failed for adapter " - "%s\n", pci_name(pdev)); + dev_err(&pdev->dev, + "pci_set_dma_mask failed\n"); err = -EIO; goto err_set_mask; } @@ -2343,8 +2431,8 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) err = pci_request_selected_regions(pdev, (1 << 2) - 1, vmxnet3_driver_name); if (err) { - printk(KERN_ERR "Failed to request region for adapter %s: " - "error %d\n", pci_name(pdev), err); + dev_err(&pdev->dev, + "Failed to request region for adapter: error %d\n", err); goto err_set_mask; } @@ -2354,8 +2442,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) mmio_len = pci_resource_len(pdev, 0); adapter->hw_addr0 = ioremap(mmio_start, mmio_len); if (!adapter->hw_addr0) { - printk(KERN_ERR "Failed to map bar0 for adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to map bar0\n"); err = -EIO; goto err_ioremap; } @@ -2364,8 +2451,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) mmio_len = pci_resource_len(pdev, 1); adapter->hw_addr1 = ioremap(mmio_start, mmio_len); if (!adapter->hw_addr1) { - printk(KERN_ERR "Failed to map bar1 for adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to map bar1\n"); err = -EIO; goto err_bar1; } @@ -2422,7 +2508,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; ring0_size = adapter->rx_queue[0].rx_ring[0].size; ring0_size = (ring0_size + sz - 1) / sz * sz; - ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / + ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / sz * sz); ring1_size = adapter->rx_queue[0].rx_ring[1].size; comp_size = ring0_size + ring1_size; @@ -2472,12 +2558,14 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, err = vmxnet3_rq_create(rq, adapter); if (err) { if (i == 0) { - printk(KERN_ERR "Could not allocate any rx" - "queues. Aborting.\n"); + netdev_err(adapter->netdev, + "Could not allocate any rx queues. " + "Aborting.\n"); goto queue_err; } else { - printk(KERN_INFO "Number of rx queues changed " - "to : %d.\n", i); + netdev_info(adapter->netdev, + "Number of rx queues changed " + "to : %d.\n", i); adapter->num_rx_queues = i; err = 0; break; @@ -2501,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev) for (i = 0; i < adapter->num_tx_queues; i++) spin_lock_init(&adapter->tx_queue[i].tx_lock); - err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, - VMXNET3_DEF_RX_RING_SIZE, + err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, + adapter->rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); if (err) goto queue_err; @@ -2572,9 +2660,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) return -EINVAL; - if (new_mtu > 1500 && !adapter->jumbo_frame) - return -EINVAL; - netdev->mtu = new_mtu; /* @@ -2593,15 +2678,17 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) vmxnet3_adjust_rx_ring_size(adapter); err = vmxnet3_rq_create_all(adapter); if (err) { - printk(KERN_ERR "%s: failed to re-create rx queues," - " error %d. Closing it.\n", netdev->name, err); + netdev_err(netdev, + "failed to re-create rx queues, " + " error %d. Closing it.\n", err); goto out; } err = vmxnet3_activate_dev(adapter); if (err) { - printk(KERN_ERR "%s: failed to re-activate, error %d. " - "Closing it\n", netdev->name, err); + netdev_err(netdev, + "failed to re-activate, error %d. " + "Closing it\n", err); goto out; } } @@ -2620,28 +2707,16 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) { struct net_device *netdev = adapter->netdev; - netdev->features = NETIF_F_SG | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER | - NETIF_F_TSO | - NETIF_F_TSO6 | + netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO; - - printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro"); - - adapter->rxcsum = true; - adapter->jumbo_frame = true; - adapter->lro = true; - - if (dma64) { - netdev->features |= NETIF_F_HIGHDMA; - printk(" highDMA"); - } - - netdev->vlan_features = netdev->features; - printk("\n"); + if (dma64) + netdev->hw_features |= NETIF_F_HIGHDMA; + netdev->vlan_features = netdev->hw_features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; } @@ -2663,46 +2738,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) /* * Enable MSIx vectors. * Returns : - * 0 on successful enabling of required vectors, - * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required - * could be enabled. - * number of vectors which can be enabled otherwise (this number is smaller + * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required + * were enabled. + * number of vectors which were enabled otherwise (this number is greater * than VMXNET3_LINUX_MIN_MSIX_VECT) */ static int -vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, - int vectors) -{ - int err = 0, vector_threshold; - vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT; - - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, - vectors); - if (!err) { - adapter->intr.num_intrs = vectors; - return 0; - } else if (err < 0) { - printk(KERN_ERR "Failed to enable MSI-X for %s, error" - " %d\n", adapter->netdev->name, err); - vectors = 0; - } else if (err < vector_threshold) { - break; - } else { - /* If fails to enable required number of MSI-x vectors - * try enabling 3 of them. One each for rx, tx and event - */ - vectors = vector_threshold; - printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" - " %d instead\n", vectors, adapter->netdev->name, - vector_threshold); - } +vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec) +{ + int ret = pci_enable_msix_range(adapter->pdev, + adapter->intr.msix_entries, nvec, nvec); + + if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) { + dev_err(&adapter->netdev->dev, + "Failed to enable %d MSI-X, trying %d\n", + nvec, VMXNET3_LINUX_MIN_MSIX_VECT); + + ret = pci_enable_msix_range(adapter->pdev, + adapter->intr.msix_entries, + VMXNET3_LINUX_MIN_MSIX_VECT, + VMXNET3_LINUX_MIN_MSIX_VECT); } - printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi" - " are lower than min threshold required.\n"); - return err; + if (ret < 0) { + dev_err(&adapter->netdev->dev, + "Failed to enable MSI-X, error: %d\n", ret); + } + + return ret; } @@ -2712,11 +2776,14 @@ static void vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) { u32 cfg; + unsigned long flags; /* intr settings */ + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); adapter->intr.type = cfg & 0x3; adapter->intr.mask_mode = (cfg >> 2) & 0x3; @@ -2726,54 +2793,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #ifdef CONFIG_PCI_MSI if (adapter->intr.type == VMXNET3_IT_MSIX) { - int vector, err = 0; - - adapter->intr.num_intrs = (adapter->share_intr == - VMXNET3_INTR_TXSHARE) ? 1 : - adapter->num_tx_queues; - adapter->intr.num_intrs += (adapter->share_intr == - VMXNET3_INTR_BUDDYSHARE) ? 0 : - adapter->num_rx_queues; - adapter->intr.num_intrs += 1; /* for link event */ - - adapter->intr.num_intrs = (adapter->intr.num_intrs > - VMXNET3_LINUX_MIN_MSIX_VECT - ? adapter->intr.num_intrs : - VMXNET3_LINUX_MIN_MSIX_VECT); - - for (vector = 0; vector < adapter->intr.num_intrs; vector++) - adapter->intr.msix_entries[vector].entry = vector; - - err = vmxnet3_acquire_msix_vectors(adapter, - adapter->intr.num_intrs); + int i, nvec; + + nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? + 1 : adapter->num_tx_queues; + nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? + 0 : adapter->num_rx_queues; + nvec += 1; /* for link event */ + nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ? + nvec : VMXNET3_LINUX_MIN_MSIX_VECT; + + for (i = 0; i < nvec; i++) + adapter->intr.msix_entries[i].entry = i; + + nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); + if (nvec < 0) + goto msix_err; + /* If we cannot allocate one MSIx vector per queue * then limit the number of rx queues to 1 */ - if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { + if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE - || adapter->num_rx_queues != 2) { + || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; - printk(KERN_ERR "Number of rx queues : 1\n"); + netdev_err(adapter->netdev, + "Number of rx queues : 1\n"); adapter->num_rx_queues = 1; - adapter->intr.num_intrs = - VMXNET3_LINUX_MIN_MSIX_VECT; } - return; } - if (!err) - return; + adapter->intr.num_intrs = nvec; + return; + +msix_err: /* If we cannot allocate MSIx vectors use only one rx queue */ - printk(KERN_INFO "Failed to enable MSI-X for %s, error %d." - "#rx queues : 1, try MSI\n", adapter->netdev->name, err); + dev_info(&adapter->pdev->dev, + "Failed to enable MSI-X, error %d. " + "Limiting #rx queues to 1, try MSI.\n", nvec); adapter->intr.type = VMXNET3_IT_MSI; } if (adapter->intr.type == VMXNET3_IT_MSI) { - int err; - err = pci_enable_msi(adapter->pdev); - if (!err) { + if (!pci_enable_msi(adapter->pdev)) { adapter->num_rx_queues = 1; adapter->intr.num_intrs = 1; return; @@ -2782,7 +2845,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #endif /* CONFIG_PCI_MSI */ adapter->num_rx_queues = 1; - printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n"); + dev_info(&adapter->netdev->dev, + "Using INTx interrupt, #Rx queues: 1.\n"); adapter->intr.type = VMXNET3_IT_INTX; /* INT-X related setting */ @@ -2808,7 +2872,7 @@ vmxnet3_tx_timeout(struct net_device *netdev) struct vmxnet3_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); + netdev_err(adapter->netdev, "tx hang\n"); schedule_work(&adapter->work); netif_wake_queue(adapter->netdev); } @@ -2828,12 +2892,12 @@ vmxnet3_reset_work(struct work_struct *data) /* if the device is closed, we must leave it alone */ rtnl_lock(); if (netif_running(adapter->netdev)) { - printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); + netdev_notice(adapter->netdev, "resetting\n"); vmxnet3_quiesce_dev(adapter); vmxnet3_reset_dev(adapter); vmxnet3_activate_dev(adapter); } else { - printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); + netdev_info(adapter->netdev, "already closed\n"); } rtnl_unlock(); @@ -2841,7 +2905,7 @@ vmxnet3_reset_work(struct work_struct *data) } -static int __devinit +static int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -2851,10 +2915,10 @@ vmxnet3_probe_device(struct pci_dev *pdev, .ndo_start_xmit = vmxnet3_xmit_frame, .ndo_set_mac_address = vmxnet3_set_mac_addr, .ndo_change_mtu = vmxnet3_change_mtu, - .ndo_get_stats = vmxnet3_get_stats, + .ndo_set_features = vmxnet3_set_features, + .ndo_get_stats64 = vmxnet3_get_stats64, .ndo_tx_timeout = vmxnet3_tx_timeout, - .ndo_set_multicast_list = vmxnet3_set_mc, - .ndo_vlan_rx_register = vmxnet3_vlan_rx_register, + .ndo_set_rx_mode = vmxnet3_set_mc, .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2871,6 +2935,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, int num_tx_queues; int num_rx_queues; + if (!pci_msi_enabled()) + enable_mq = 0; + #ifdef VMXNET3_RSS if (enable_mq) num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, @@ -2878,6 +2945,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, else #endif num_rx_queues = 1; + num_rx_queues = rounddown_pow_of_two(num_rx_queues); if (enable_mq) num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES, @@ -2885,63 +2953,72 @@ vmxnet3_probe_device(struct pci_dev *pdev, else num_tx_queues = 1; + num_tx_queues = rounddown_pow_of_two(num_tx_queues); netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), max(num_tx_queues, num_rx_queues)); - printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", - num_tx_queues, num_rx_queues); + dev_info(&pdev->dev, + "# of Tx queues : %d, # of Rx queues : %d\n", + num_tx_queues, num_rx_queues); - if (!netdev) { - printk(KERN_ERR "Failed to alloc ethernet device for adapter " - "%s\n", pci_name(pdev)); + if (!netdev) return -ENOMEM; - } pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; - adapter->shared = pci_alloc_consistent(adapter->pdev, - sizeof(struct Vmxnet3_DriverShared), - &adapter->shared_pa); + adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; + adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; + + spin_lock_init(&adapter->cmd_lock); + adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, + sizeof(struct vmxnet3_adapter), + PCI_DMA_TODEVICE); + adapter->shared = dma_alloc_coherent( + &adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + &adapter->shared_pa, GFP_KERNEL); if (!adapter->shared) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to allocate memory\n"); err = -ENOMEM; goto err_alloc_shared; } adapter->num_rx_queues = num_rx_queues; adapter->num_tx_queues = num_tx_queues; + adapter->rx_buf_per_pkt = 1; size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; - adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, - &adapter->queue_desc_pa); + adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, + &adapter->queue_desc_pa, + GFP_KERNEL); if (!adapter->tqd_start) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to allocate memory\n"); err = -ENOMEM; goto err_alloc_queue_desc; } adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + - adapter->num_tx_queues); + adapter->num_tx_queues); - adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); + adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_PMConf), + &adapter->pm_conf_pa, + GFP_KERNEL); if (adapter->pm_conf == NULL) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); err = -ENOMEM; goto err_alloc_pm; } #ifdef VMXNET3_RSS - adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); + adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct UPT1_RSSConf), + &adapter->rss_conf_pa, + GFP_KERNEL); if (adapter->rss_conf == NULL) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); err = -ENOMEM; goto err_alloc_rss; } @@ -2955,8 +3032,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (ver & 1) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); } else { - printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" - " %s\n", ver, pci_name(pdev)); + dev_err(&pdev->dev, + "Incompatible h/w version (0x%x) for adapter\n", ver); err = -EBUSY; goto err_ver; } @@ -2965,19 +3042,18 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (ver & 1) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); } else { - printk(KERN_ERR "Incompatible upt version (0x%x) for " - "adapter %s\n", ver, pci_name(pdev)); + dev_err(&pdev->dev, + "Incompatible upt version (0x%x) for adapter\n", ver); err = -EBUSY; goto err_ver; } + SET_NETDEV_DEV(netdev, &pdev->dev); vmxnet3_declare_features(adapter, dma64); - adapter->dev_number = atomic_read(&devices_found); - - adapter->share_intr = irq_share_mode; - if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE && - adapter->num_tx_queues != adapter->num_rx_queues) + if (adapter->num_tx_queues == adapter->num_rx_queues) + adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; + else adapter->share_intr = VMXNET3_INTR_DONTSHARE; vmxnet3_alloc_intr_resources(adapter); @@ -2986,7 +3062,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (adapter->num_rx_queues > 1 && adapter->intr.type == VMXNET3_IT_MSIX) { adapter->rss = true; - printk(KERN_INFO "RSS is enabled.\n"); + netdev->hw_features |= NETIF_F_RXHASH; + netdev->features |= NETIF_F_RXHASH; + dev_dbg(&pdev->dev, "RSS is enabled.\n"); } else { adapter->rss = false; } @@ -3000,6 +3078,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, netdev->watchdog_timeo = 5 * HZ; INIT_WORK(&adapter->work, vmxnet3_reset_work); + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); if (adapter->intr.type == VMXNET3_IT_MSIX) { int i; @@ -3016,18 +3095,15 @@ vmxnet3_probe_device(struct pci_dev *pdev, netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); - SET_NETDEV_DEV(netdev, &pdev->dev); + netif_carrier_off(netdev); err = register_netdev(netdev); if (err) { - printk(KERN_ERR "Failed to register adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to register adapter\n"); goto err_register; } - set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); vmxnet3_check_link(adapter, false); - atomic_inc(&devices_found); return 0; err_register: @@ -3036,24 +3112,28 @@ err_ver: vmxnet3_free_pci_resources(adapter); err_alloc_pci: #ifdef VMXNET3_RSS - kfree(adapter->rss_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), + adapter->rss_conf, adapter->rss_conf_pa); err_alloc_rss: #endif - kfree(adapter->pm_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), + adapter->pm_conf, adapter->pm_conf_pa); err_alloc_pm: - pci_free_consistent(adapter->pdev, size, adapter->tqd_start, - adapter->queue_desc_pa); + dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, + adapter->queue_desc_pa); err_alloc_queue_desc: - pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), - adapter->shared, adapter->shared_pa); + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); err_alloc_shared: - pci_set_drvdata(pdev, NULL); + dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, + sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); free_netdev(netdev); return err; } -static void __devexit +static void vmxnet3_remove_device(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3068,24 +3148,30 @@ vmxnet3_remove_device(struct pci_dev *pdev) else #endif num_rx_queues = 1; + num_rx_queues = rounddown_pow_of_two(num_rx_queues); - flush_scheduled_work(); + cancel_work_sync(&adapter->work); unregister_netdev(netdev); vmxnet3_free_intr_resources(adapter); vmxnet3_free_pci_resources(adapter); #ifdef VMXNET3_RSS - kfree(adapter->rss_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), + adapter->rss_conf, adapter->rss_conf_pa); #endif - kfree(adapter->pm_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), + adapter->pm_conf, adapter->pm_conf_pa); size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; - pci_free_consistent(adapter->pdev, size, adapter->tqd_start, - adapter->queue_desc_pa); - pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), - adapter->shared, adapter->shared_pa); + dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, + adapter->queue_desc_pa); + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); + dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, + sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); free_netdev(netdev); } @@ -3104,11 +3190,15 @@ vmxnet3_suspend(struct device *device) u8 *arpreq; struct in_device *in_dev; struct in_ifaddr *ifa; + unsigned long flags; int i = 0; if (!netif_running(netdev)) return 0; + for (i = 0; i < adapter->num_rx_queues; i++) + napi_disable(&adapter->rx_queue[i].napi); + vmxnet3_disable_all_intrs(adapter); vmxnet3_free_irqs(adapter); vmxnet3_free_intr_resources(adapter); @@ -3181,11 +3271,13 @@ skip_arp: adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confPA = + cpu_to_le64(adapter->pm_conf_pa); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_PMCFG); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), @@ -3200,7 +3292,8 @@ skip_arp: static int vmxnet3_resume(struct device *device) { - int err; + int err, i = 0; + unsigned long flags; struct pci_dev *pdev = to_pci_dev(device); struct net_device *netdev = pci_get_drvdata(pdev); struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -3216,8 +3309,8 @@ vmxnet3_resume(struct device *device) adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confPA = + cpu_to_le64(adapter->pm_conf_pa); netif_device_attach(netdev); pci_set_power_state(pdev, PCI_D0); @@ -3228,10 +3321,14 @@ vmxnet3_resume(struct device *device) pci_enable_wake(pdev, PCI_D0, 0); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_PMCFG); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); vmxnet3_alloc_intr_resources(adapter); vmxnet3_request_irqs(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) + napi_enable(&adapter->rx_queue[i].napi); vmxnet3_enable_all_intrs(adapter); return 0; @@ -3247,7 +3344,7 @@ static struct pci_driver vmxnet3_driver = { .name = vmxnet3_driver_name, .id_table = vmxnet3_pciid_table, .probe = vmxnet3_probe_device, - .remove = __devexit_p(vmxnet3_remove_device), + .remove = vmxnet3_remove_device, #ifdef CONFIG_PM .driver.pm = &vmxnet3_pm_ops, #endif @@ -3257,7 +3354,7 @@ static struct pci_driver vmxnet3_driver = { static int __init vmxnet3_init_module(void) { - printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, + pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC, VMXNET3_DRIVER_VERSION_REPORT); return pci_register_driver(&vmxnet3_driver); } |
