diff options
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_drv.c')
| -rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 739 |
1 files changed, 395 insertions, 344 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index de7fc345148..b76f7dcde0d 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -43,11 +43,7 @@ static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = { MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); -static atomic_t devices_found; - -#define VMXNET3_MAX_DEVICES 10 static int enable_mq = 1; -static int irq_share_mode; static void vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); @@ -152,10 +148,9 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) adapter->link_speed = ret >> 16; if (ret & 1) { /* Link is up. */ - printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", - adapter->netdev->name, adapter->link_speed); - if (!netif_carrier_ok(adapter->netdev)) - netif_carrier_on(adapter->netdev); + netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", + adapter->link_speed); + netif_carrier_on(adapter->netdev); if (affectTxQueue) { for (i = 0; i < adapter->num_tx_queues; i++) @@ -163,10 +158,8 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) adapter); } } else { - printk(KERN_INFO "%s: NIC Link is Down\n", - adapter->netdev->name); - if (netif_carrier_ok(adapter->netdev)) - netif_carrier_off(adapter->netdev); + netdev_info(adapter->netdev, "NIC Link is Down\n"); + netif_carrier_off(adapter->netdev); if (affectTxQueue) { for (i = 0; i < adapter->num_tx_queues; i++) @@ -320,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, struct pci_dev *pdev) { if (tbi->map_type == VMXNET3_MAP_SINGLE) - pci_unmap_single(pdev, tbi->dma_addr, tbi->len, + dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, PCI_DMA_TODEVICE); else if (tbi->map_type == VMXNET3_MAP_PAGE) - pci_unmap_page(pdev, tbi->dma_addr, tbi->len, + dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, PCI_DMA_TODEVICE); else BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); @@ -436,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { if (tq->tx_ring.base) { - pci_free_consistent(adapter->pdev, tq->tx_ring.size * - sizeof(struct Vmxnet3_TxDesc), - tq->tx_ring.base, tq->tx_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * + sizeof(struct Vmxnet3_TxDesc), + tq->tx_ring.base, tq->tx_ring.basePA); tq->tx_ring.base = NULL; } if (tq->data_ring.base) { - pci_free_consistent(adapter->pdev, tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc), - tq->data_ring.base, tq->data_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc), + tq->data_ring.base, tq->data_ring.basePA); tq->data_ring.base = NULL; } if (tq->comp_ring.base) { - pci_free_consistent(adapter->pdev, tq->comp_ring.size * - sizeof(struct Vmxnet3_TxCompDesc), - tq->comp_ring.base, tq->comp_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc), + tq->comp_ring.base, tq->comp_ring.basePA); tq->comp_ring.base = NULL; } - kfree(tq->buf_info); - tq->buf_info = NULL; + if (tq->buf_info) { + dma_free_coherent(&adapter->pdev->dev, + tq->tx_ring.size * sizeof(tq->buf_info[0]), + tq->buf_info, tq->buf_info_pa); + tq->buf_info = NULL; + } } @@ -503,45 +500,40 @@ static int vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { + size_t sz; + BUG_ON(tq->tx_ring.base || tq->data_ring.base || tq->comp_ring.base || tq->buf_info); - tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size - * sizeof(struct Vmxnet3_TxDesc), - &tq->tx_ring.basePA); + tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), + &tq->tx_ring.basePA, GFP_KERNEL); if (!tq->tx_ring.base) { - printk(KERN_ERR "%s: failed to allocate tx ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate tx ring\n"); goto err; } - tq->data_ring.base = pci_alloc_consistent(adapter->pdev, - tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc), - &tq->data_ring.basePA); + tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), + &tq->data_ring.basePA, GFP_KERNEL); if (!tq->data_ring.base) { - printk(KERN_ERR "%s: failed to allocate data ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate data ring\n"); goto err; } - tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, - tq->comp_ring.size * - sizeof(struct Vmxnet3_TxCompDesc), - &tq->comp_ring.basePA); + tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), + &tq->comp_ring.basePA, GFP_KERNEL); if (!tq->comp_ring.base) { - printk(KERN_ERR "%s: failed to allocate tx comp ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); goto err; } - tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), - GFP_KERNEL); - if (!tq->buf_info) { - printk(KERN_ERR "%s: failed to allocate tx bufinfo\n", - adapter->netdev->name); + sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); + tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, + &tq->buf_info_pa, GFP_KERNEL); + if (!tq->buf_info) goto err; - } return 0; @@ -583,16 +575,16 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { if (rbi->skb == NULL) { - rbi->skb = dev_alloc_skb(rbi->len + - NET_IP_ALIGN); + rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, + rbi->len, + GFP_KERNEL); if (unlikely(rbi->skb == NULL)) { rq->stats.rx_buf_alloc_failure++; break; } - rbi->skb->dev = adapter->netdev; - skb_reserve(rbi->skb, NET_IP_ALIGN); - rbi->dma_addr = pci_map_single(adapter->pdev, + rbi->dma_addr = dma_map_single( + &adapter->pdev->dev, rbi->skb->data, rbi->len, PCI_DMA_FROMDEVICE); } else { @@ -609,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, rq->stats.rx_buf_alloc_failure++; break; } - rbi->dma_addr = pci_map_page(adapter->pdev, + rbi->dma_addr = dma_map_page( + &adapter->pdev->dev, rbi->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); } else { @@ -632,12 +625,10 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, num_allocated++; vmxnet3_cmd_ring_adv_next2fill(ring); } - rq->uncommitted[ring_idx] += num_allocated; - dev_dbg(&adapter->netdev->dev, - "alloc_rx_buf: %d allocated, next2fill %u, next2comp " - "%u, uncommited %u\n", num_allocated, ring->next2fill, - ring->next2comp, rq->uncommitted[ring_idx]); + netdev_dbg(adapter->netdev, + "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n", + num_allocated, ring->next2fill, ring->next2comp); /* so that the device can distinguish a full ring and an empty ring */ BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); @@ -694,7 +685,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi = tq->buf_info + tq->tx_ring.next2fill; tbi->map_type = VMXNET3_MAP_NONE; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", tq->tx_ring.next2fill, le64_to_cpu(ctx->sop_txd->txd.addr), @@ -721,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi = tq->buf_info + tq->tx_ring.next2fill; tbi->map_type = VMXNET3_MAP_SINGLE; - tbi->dma_addr = pci_map_single(adapter->pdev, + tbi->dma_addr = dma_map_single(&adapter->pdev->dev, skb->data + buf_offset, buf_size, PCI_DMA_TODEVICE); @@ -734,7 +725,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, gdesc->dword[2] = cpu_to_le32(dw2); gdesc->dword[3] = 0; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); @@ -747,28 +738,43 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + u32 buf_size; - tbi = tq->buf_info + tq->tx_ring.next2fill; - tbi->map_type = VMXNET3_MAP_PAGE; - tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, - 0, skb_frag_size(frag), - DMA_TO_DEVICE); + buf_offset = 0; + len = skb_frag_size(frag); + while (len) { + tbi = tq->buf_info + tq->tx_ring.next2fill; + if (len < VMXNET3_MAX_TX_BUF_SIZE) { + buf_size = len; + dw2 |= len; + } else { + buf_size = VMXNET3_MAX_TX_BUF_SIZE; + /* spec says that for TxDesc.len, 0 == 2^14 */ + } + tbi->map_type = VMXNET3_MAP_PAGE; + tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, + buf_offset, buf_size, + DMA_TO_DEVICE); - tbi->len = skb_frag_size(frag); + tbi->len = buf_size; - gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; - BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); - gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); - gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); - gdesc->dword[3] = 0; + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); + gdesc->dword[2] = cpu_to_le32(dw2); + gdesc->dword[3] = 0; - dev_dbg(&adapter->netdev->dev, - "txd[%u]: 0x%llu %u %u\n", - tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), - le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); - vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); - dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + netdev_dbg(adapter->netdev, + "txd[%u]: 0x%llu %u %u\n", + tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), + le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + + len -= buf_size; + buf_offset += buf_size; + } } ctx->eop_txd = gdesc; @@ -816,35 +822,27 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, if (ctx->mss) { /* TSO */ ctx->eth_ip_hdr_size = skb_transport_offset(skb); - ctx->l4_hdr_size = ((struct tcphdr *) - skb_transport_header(skb))->doff * 4; + ctx->l4_hdr_size = tcp_hdrlen(skb); ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); if (ctx->ipv4) { - struct iphdr *iph = (struct iphdr *) - skb_network_header(skb); + const struct iphdr *iph = ip_hdr(skb); + if (iph->protocol == IPPROTO_TCP) - ctx->l4_hdr_size = ((struct tcphdr *) - skb_transport_header(skb))->doff * 4; + ctx->l4_hdr_size = tcp_hdrlen(skb); else if (iph->protocol == IPPROTO_UDP) - /* - * Use tcp header size so that bytes to - * be copied are more than required by - * the device. - */ - ctx->l4_hdr_size = - sizeof(struct tcphdr); + ctx->l4_hdr_size = sizeof(struct udphdr); else ctx->l4_hdr_size = 0; } else { /* for simplicity, don't copy L4 headers */ ctx->l4_hdr_size = 0; } - ctx->copy_size = ctx->eth_ip_hdr_size + - ctx->l4_hdr_size; + ctx->copy_size = min(ctx->eth_ip_hdr_size + + ctx->l4_hdr_size, skb->len); } else { ctx->eth_ip_hdr_size = 0; ctx->l4_hdr_size = 0; @@ -867,7 +865,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, tdd = tq->data_ring.base + tq->tx_ring.next2fill; memcpy(tdd->data, skb->data, ctx->copy_size); - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "copy %u bytes to dataRing[%u]\n", ctx->copy_size, tq->tx_ring.next2fill); return 1; @@ -881,19 +879,34 @@ static void vmxnet3_prepare_tso(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx) { - struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); + struct tcphdr *tcph = tcp_hdr(skb); + if (ctx->ipv4) { - struct iphdr *iph = (struct iphdr *)skb_network_header(skb); + struct iphdr *iph = ip_hdr(skb); + iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); + struct ipv6hdr *iph = ipv6_hdr(skb); + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, IPPROTO_TCP, 0); } } +static int txd_estimate(const struct sk_buff *skb) +{ + int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + + count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); + } + return count; +} /* * Transmits a pkt thru a given tq @@ -922,9 +935,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, union Vmxnet3_GenericDesc tempTxDesc; #endif - /* conservatively estimate # of descriptors to use */ - count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + - skb_shinfo(skb)->nr_frags + 1; + count = txd_estimate(skb); ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); @@ -960,7 +971,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { tq->stats.tx_ring_full++; - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "tx queue stopped on %s, next2comp %u" " next2fill %u\n", adapter->netdev->name, tq->tx_ring.next2comp, tq->tx_ring.next2fill); @@ -1043,9 +1054,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, (struct Vmxnet3_TxDesc *)ctx.sop_txd); gdesc = ctx.sop_txd; #endif - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", - (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - + (u32)(ctx.sop_txd - tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); @@ -1067,7 +1078,7 @@ unlock_drop_pkt: spin_unlock_irqrestore(&tq->tx_lock, flags); drop_pkt: tq->stats.drop_total++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1077,10 +1088,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - BUG_ON(skb->queue_mapping > adapter->num_tx_queues); - return vmxnet3_tq_xmit(skb, - &adapter->tx_queue[skb->queue_mapping], - adapter, netdev); + BUG_ON(skb->queue_mapping > adapter->num_tx_queues); + return vmxnet3_tq_xmit(skb, + &adapter->tx_queue[skb->queue_mapping], + adapter, netdev); } @@ -1196,7 +1207,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (unlikely(rcd->len == 0)) { /* Pretend the rx buffer is skipped. */ BUG_ON(!(rcd->sop && rcd->eop)); - dev_dbg(&adapter->netdev->dev, + netdev_dbg(adapter->netdev, "rxRing[%u][%u] 0 length\n", ring_idx, idx); goto rcd_done; @@ -1204,7 +1215,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = false; ctx->skb = rbi->skb; - new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN); + new_skb = netdev_alloc_skb_ip_align(adapter->netdev, + rbi->len); if (new_skb == NULL) { /* Skb allocation failed, do not handover this * skb to stack. Reuse it. Drop the existing pkt @@ -1216,18 +1228,24 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, goto rcd_done; } - pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, + dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, + rbi->len, PCI_DMA_FROMDEVICE); +#ifdef VMXNET3_RSS + if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && + (adapter->netdev->features & NETIF_F_RXHASH)) + skb_set_hash(ctx->skb, + le32_to_cpu(rcd->rssHash), + PKT_HASH_TYPE_L3); +#endif skb_put(ctx->skb, rcd->len); /* Immediate refill */ - new_skb->dev = adapter->netdev; - skb_reserve(new_skb, NET_IP_ALIGN); rbi->skb = new_skb; - rbi->dma_addr = pci_map_single(adapter->pdev, - rbi->skb->data, rbi->len, - PCI_DMA_FROMDEVICE); + rbi->dma_addr = dma_map_single(&adapter->pdev->dev, + rbi->skb->data, rbi->len, + PCI_DMA_FROMDEVICE); rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->len = rbi->len; @@ -1259,7 +1277,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, } if (rcd->len) { - pci_unmap_page(adapter->pdev, + dma_unmap_page(&adapter->pdev->dev, rbi->dma_addr, rbi->len, PCI_DMA_FROMDEVICE); @@ -1268,7 +1286,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, /* Immediate refill */ rbi->page = new_page; - rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, + rbi->dma_addr = dma_map_page(&adapter->pdev->dev, + rbi->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); rxd->addr = cpu_to_le64(rbi->dma_addr); @@ -1285,7 +1304,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skb->protocol = eth_type_trans(skb, adapter->netdev); if (unlikely(rcd->ts)) - __vlan_hwaccel_put_tag(skb, rcd->tci); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); if (adapter->netdev->features & NETIF_F_LRO) netif_receive_skb(skb); @@ -1314,14 +1333,13 @@ rcd_done: /* if needed, update the register */ if (unlikely(rq->shared->updateRxProd)) { VMXNET3_WRITE_BAR0_REG(adapter, - rxprod_reg[ring_idx] + rq->qid * 8, - ring->next2fill); - rq->uncommitted[ring_idx] = 0; + rxprod_reg[ring_idx] + rq->qid * 8, + ring->next2fill); } vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); vmxnet3_getRxComp(rcd, - &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); + &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); } return num_rxd; @@ -1345,13 +1363,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && rq->buf_info[ring_idx][i].skb) { - pci_unmap_single(adapter->pdev, rxd->addr, + dma_unmap_single(&adapter->pdev->dev, rxd->addr, rxd->len, PCI_DMA_FROMDEVICE); dev_kfree_skb(rq->buf_info[ring_idx][i].skb); rq->buf_info[ring_idx][i].skb = NULL; } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && rq->buf_info[ring_idx][i].page) { - pci_unmap_page(adapter->pdev, rxd->addr, + dma_unmap_page(&adapter->pdev->dev, rxd->addr, rxd->len, PCI_DMA_FROMDEVICE); put_page(rq->buf_info[ring_idx][i].page); rq->buf_info[ring_idx][i].page = NULL; @@ -1361,7 +1379,6 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; rq->rx_ring[ring_idx].next2fill = rq->rx_ring[ring_idx].next2comp = 0; - rq->uncommitted[ring_idx] = 0; } rq->comp_ring.gen = VMXNET3_INIT_GEN; @@ -1379,8 +1396,8 @@ vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) } -void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, - struct vmxnet3_adapter *adapter) +static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) { int i; int j; @@ -1394,25 +1411,31 @@ void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } - kfree(rq->buf_info[0]); - for (i = 0; i < 2; i++) { if (rq->rx_ring[i].base) { - pci_free_consistent(adapter->pdev, rq->rx_ring[i].size - * sizeof(struct Vmxnet3_RxDesc), - rq->rx_ring[i].base, - rq->rx_ring[i].basePA); + dma_free_coherent(&adapter->pdev->dev, + rq->rx_ring[i].size + * sizeof(struct Vmxnet3_RxDesc), + rq->rx_ring[i].base, + rq->rx_ring[i].basePA); rq->rx_ring[i].base = NULL; } rq->buf_info[i] = NULL; } if (rq->comp_ring.base) { - pci_free_consistent(adapter->pdev, rq->comp_ring.size * - sizeof(struct Vmxnet3_RxCompDesc), - rq->comp_ring.base, rq->comp_ring.basePA); + dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size + * sizeof(struct Vmxnet3_RxCompDesc), + rq->comp_ring.base, rq->comp_ring.basePA); rq->comp_ring.base = NULL; } + + if (rq->buf_info[0]) { + size_t sz = sizeof(struct vmxnet3_rx_buf_info) * + (rq->rx_ring[0].size + rq->rx_ring[1].size); + dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], + rq->buf_info_pa); + } } @@ -1442,7 +1465,6 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, /* reset internal state and allocate buffers for both rings */ for (i = 0; i < 2; i++) { rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; - rq->uncommitted[i] = 0; memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc)); @@ -1498,32 +1520,33 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) for (i = 0; i < 2; i++) { sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); - rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, - &rq->rx_ring[i].basePA); + rq->rx_ring[i].base = dma_alloc_coherent( + &adapter->pdev->dev, sz, + &rq->rx_ring[i].basePA, + GFP_KERNEL); if (!rq->rx_ring[i].base) { - printk(KERN_ERR "%s: failed to allocate rx ring %d\n", - adapter->netdev->name, i); + netdev_err(adapter->netdev, + "failed to allocate rx ring %d\n", i); goto err; } } sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); - rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, - &rq->comp_ring.basePA); + rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, + &rq->comp_ring.basePA, + GFP_KERNEL); if (!rq->comp_ring.base) { - printk(KERN_ERR "%s: failed to allocate rx comp ring\n", - adapter->netdev->name); + netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); goto err; } sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + rq->rx_ring[1].size); - bi = kzalloc(sz, GFP_KERNEL); - if (!bi) { - printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", - adapter->netdev->name); + bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, + GFP_KERNEL); + if (!bi) goto err; - } + rq->buf_info[0] = bi; rq->buf_info[1] = bi + rq->rx_ring[0].size; @@ -1739,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_all_intrs(adapter); - - vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); - vmxnet3_enable_all_intrs(adapter); + switch (adapter->intr.type) { +#ifdef CONFIG_PCI_MSI + case VMXNET3_IT_MSIX: { + int i; + for (i = 0; i < adapter->num_rx_queues; i++) + vmxnet3_msix_rx(0, &adapter->rx_queue[i]); + break; + } +#endif + case VMXNET3_IT_MSI: + default: + vmxnet3_intr(0, adapter->netdev); + break; + } } #endif /* CONFIG_NET_POLL_CONTROLLER */ @@ -1806,9 +1838,10 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) adapter->rx_queue[i].name, &(adapter->rx_queue[i])); if (err) { - printk(KERN_ERR "Failed to request irq for MSIX" - ", %s, error %d\n", - adapter->rx_queue[i].name, err); + netdev_err(adapter->netdev, + "Failed to request irq for MSIX, " + "%s, error %d\n", + adapter->rx_queue[i].name, err); return err; } @@ -1837,8 +1870,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) #endif intr->num_intrs = vector + 1; if (err) { - printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" - ":%d\n", adapter->netdev->name, intr->type, err); + netdev_err(adapter->netdev, + "Failed to request irq (intr type:%d), error %d\n", + intr->type, err); } else { /* Number of rx queues will not change after this */ for (i = 0; i < adapter->num_rx_queues; i++) { @@ -1859,9 +1893,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) adapter->rx_queue[0].comp_ring.intr_idx = 0; } - printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " - "allocated\n", adapter->netdev->name, intr->type, - intr->mask_mode, intr->num_intrs); + netdev_info(adapter->netdev, + "intr type %u, mode %u, %u vectors allocated\n", + intr->type, intr->mask_mode, intr->num_intrs); } return err; @@ -1907,7 +1941,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) free_irq(adapter->pdev->irq, adapter->netdev); break; default: - BUG_ON(true); + BUG(); } } @@ -1927,7 +1961,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) static int -vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -1949,7 +1983,7 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) static int -vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -2001,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev) struct Vmxnet3_RxFilterConf *rxConf = &adapter->shared->devRead.rxFilterConf; u8 *new_table = NULL; + dma_addr_t new_table_pa = 0; u32 new_mode = VMXNET3_RXM_UCAST; if (netdev->flags & IFF_PROMISC) { @@ -2024,11 +2059,15 @@ vmxnet3_set_mc(struct net_device *netdev) new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTableLen = cpu_to_le16( netdev_mc_count(netdev) * ETH_ALEN); - rxConf->mfTablePA = cpu_to_le64(virt_to_phys( - new_table)); + new_table_pa = dma_map_single( + &adapter->pdev->dev, + new_table, + rxConf->mfTableLen, + PCI_DMA_TODEVICE); + rxConf->mfTablePA = cpu_to_le64(new_table_pa); } else { - printk(KERN_INFO "%s: failed to copy mcast list" - ", setting ALL_MULTI\n", netdev->name); + netdev_info(netdev, "failed to copy mcast list" + ", setting ALL_MULTI\n"); new_mode |= VMXNET3_RXM_ALL_MULTI; } } @@ -2052,7 +2091,11 @@ vmxnet3_set_mc(struct net_device *netdev) VMXNET3_CMD_UPDATE_MAC_FILTERS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); - kfree(new_table); + if (new_table) { + dma_unmap_single(&adapter->pdev->dev, new_table_pa, + rxConf->mfTableLen, PCI_DMA_TODEVICE); + kfree(new_table); + } } void @@ -2092,7 +2135,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); - devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); + devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); /* set up feature flags */ @@ -2103,7 +2146,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->misc.uptFeatures |= UPT1_F_LRO; devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); } - if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); @@ -2121,7 +2164,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); - tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); + tqc->ddPA = cpu_to_le64(tq->buf_info_pa); tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); @@ -2139,8 +2182,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); - rqc->ddPA = cpu_to_le64(virt_to_phys( - rq->buf_info)); + rqc->ddPA = cpu_to_le64(rq->buf_info_pa); rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); @@ -2156,6 +2198,14 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) if (adapter->rss) { struct UPT1_RSSConf *rssConf = adapter->rss_conf; + static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { + 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, + 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, + 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, + 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, + 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, + }; + devRead->misc.uptFeatures |= UPT1_F_RSS; devRead->misc.numRxQueues = adapter->num_rx_queues; rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | @@ -2165,14 +2215,16 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; - get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); + memcpy(rssConf->hashKey, rss_key, sizeof(rss_key)); + for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = ethtool_rxfh_indir_default( i, adapter->num_rx_queues); devRead->rssConfDesc.confVer = 1; - devRead->rssConfDesc.confLen = sizeof(*rssConf); - devRead->rssConfDesc.confPA = virt_to_phys(rssConf); + devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); + devRead->rssConfDesc.confPA = + cpu_to_le64(adapter->rss_conf_pa); } #endif /* VMXNET3_RSS */ @@ -2203,7 +2255,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) u32 ret; unsigned long flags; - dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," + netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," " ring sizes %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, adapter->rx_buf_per_pkt, adapter->tx_queue[0].tx_ring.size, @@ -2213,15 +2265,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) vmxnet3_tq_init_all(adapter); err = vmxnet3_rq_init_all(adapter); if (err) { - printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", - adapter->netdev->name, err); + netdev_err(adapter->netdev, + "Failed to init rx queue error %d\n", err); goto rq_err; } err = vmxnet3_request_irqs(adapter); if (err) { - printk(KERN_ERR "Failed to setup irq for %s: error %d\n", - adapter->netdev->name, err); + netdev_err(adapter->netdev, + "Failed to setup irq for error %d\n", err); goto irq_err; } @@ -2238,8 +2290,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) spin_unlock_irqrestore(&adapter->cmd_lock, flags); if (ret != 0) { - printk(KERN_ERR "Failed to activate dev %s: error %u\n", - adapter->netdev->name, ret); + netdev_err(adapter->netdev, + "Failed to activate dev: error %u\n", ret); err = -EINVAL; goto activate_err; } @@ -2354,23 +2406,22 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) err = pci_enable_device(pdev); if (err) { - printk(KERN_ERR "Failed to enable adapter %s: error %d\n", - pci_name(pdev), err); + dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); return err; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - printk(KERN_ERR "pci_set_consistent_dma_mask failed " - "for adapter %s\n", pci_name(pdev)); + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed\n"); err = -EIO; goto err_set_mask; } *dma64 = true; } else { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - printk(KERN_ERR "pci_set_dma_mask failed for adapter " - "%s\n", pci_name(pdev)); + dev_err(&pdev->dev, + "pci_set_dma_mask failed\n"); err = -EIO; goto err_set_mask; } @@ -2380,8 +2431,8 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) err = pci_request_selected_regions(pdev, (1 << 2) - 1, vmxnet3_driver_name); if (err) { - printk(KERN_ERR "Failed to request region for adapter %s: " - "error %d\n", pci_name(pdev), err); + dev_err(&pdev->dev, + "Failed to request region for adapter: error %d\n", err); goto err_set_mask; } @@ -2391,8 +2442,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) mmio_len = pci_resource_len(pdev, 0); adapter->hw_addr0 = ioremap(mmio_start, mmio_len); if (!adapter->hw_addr0) { - printk(KERN_ERR "Failed to map bar0 for adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to map bar0\n"); err = -EIO; goto err_ioremap; } @@ -2401,8 +2451,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) mmio_len = pci_resource_len(pdev, 1); adapter->hw_addr1 = ioremap(mmio_start, mmio_len); if (!adapter->hw_addr1) { - printk(KERN_ERR "Failed to map bar1 for adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to map bar1\n"); err = -EIO; goto err_bar1; } @@ -2509,12 +2558,14 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, err = vmxnet3_rq_create(rq, adapter); if (err) { if (i == 0) { - printk(KERN_ERR "Could not allocate any rx" - "queues. Aborting.\n"); + netdev_err(adapter->netdev, + "Could not allocate any rx queues. " + "Aborting.\n"); goto queue_err; } else { - printk(KERN_INFO "Number of rx queues changed " - "to : %d.\n", i); + netdev_info(adapter->netdev, + "Number of rx queues changed " + "to : %d.\n", i); adapter->num_rx_queues = i; err = 0; break; @@ -2538,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev) for (i = 0; i < adapter->num_tx_queues; i++) spin_lock_init(&adapter->tx_queue[i].tx_lock); - err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, - VMXNET3_DEF_RX_RING_SIZE, + err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, + adapter->rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); if (err) goto queue_err; @@ -2627,15 +2678,17 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) vmxnet3_adjust_rx_ring_size(adapter); err = vmxnet3_rq_create_all(adapter); if (err) { - printk(KERN_ERR "%s: failed to re-create rx queues," - " error %d. Closing it.\n", netdev->name, err); + netdev_err(netdev, + "failed to re-create rx queues, " + " error %d. Closing it.\n", err); goto out; } err = vmxnet3_activate_dev(adapter); if (err) { - printk(KERN_ERR "%s: failed to re-activate, error %d. " - "Closing it\n", netdev->name, err); + netdev_err(netdev, + "failed to re-activate, error %d. " + "Closing it\n", err); goto out; } } @@ -2655,18 +2708,15 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) struct net_device *netdev = adapter->netdev; netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO; if (dma64) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->vlan_features = netdev->hw_features & - ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); - netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER; - - netdev_info(adapter->netdev, - "features: sg csum vlan jf tso tsoIPv6 lro%s\n", - dma64 ? " highDMA" : ""); + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; } @@ -2688,46 +2738,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) /* * Enable MSIx vectors. * Returns : - * 0 on successful enabling of required vectors, * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required - * could be enabled. - * number of vectors which can be enabled otherwise (this number is smaller + * were enabled. + * number of vectors which were enabled otherwise (this number is greater * than VMXNET3_LINUX_MIN_MSIX_VECT) */ static int -vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, - int vectors) -{ - int err = 0, vector_threshold; - vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT; - - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, - vectors); - if (!err) { - adapter->intr.num_intrs = vectors; - return 0; - } else if (err < 0) { - printk(KERN_ERR "Failed to enable MSI-X for %s, error" - " %d\n", adapter->netdev->name, err); - vectors = 0; - } else if (err < vector_threshold) { - break; - } else { - /* If fails to enable required number of MSI-x vectors - * try enabling minimum number of vectors required. - */ - vectors = vector_threshold; - printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" - " %d instead\n", vectors, adapter->netdev->name, - vector_threshold); - } +vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec) +{ + int ret = pci_enable_msix_range(adapter->pdev, + adapter->intr.msix_entries, nvec, nvec); + + if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) { + dev_err(&adapter->netdev->dev, + "Failed to enable %d MSI-X, trying %d\n", + nvec, VMXNET3_LINUX_MIN_MSIX_VECT); + + ret = pci_enable_msix_range(adapter->pdev, + adapter->intr.msix_entries, + VMXNET3_LINUX_MIN_MSIX_VECT, + VMXNET3_LINUX_MIN_MSIX_VECT); } - printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi" - " are lower than min threshold required.\n"); - return err; + if (ret < 0) { + dev_err(&adapter->netdev->dev, + "Failed to enable MSI-X, error: %d\n", ret); + } + + return ret; } @@ -2754,54 +2793,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #ifdef CONFIG_PCI_MSI if (adapter->intr.type == VMXNET3_IT_MSIX) { - int vector, err = 0; - - adapter->intr.num_intrs = (adapter->share_intr == - VMXNET3_INTR_TXSHARE) ? 1 : - adapter->num_tx_queues; - adapter->intr.num_intrs += (adapter->share_intr == - VMXNET3_INTR_BUDDYSHARE) ? 0 : - adapter->num_rx_queues; - adapter->intr.num_intrs += 1; /* for link event */ - - adapter->intr.num_intrs = (adapter->intr.num_intrs > - VMXNET3_LINUX_MIN_MSIX_VECT - ? adapter->intr.num_intrs : - VMXNET3_LINUX_MIN_MSIX_VECT); - - for (vector = 0; vector < adapter->intr.num_intrs; vector++) - adapter->intr.msix_entries[vector].entry = vector; - - err = vmxnet3_acquire_msix_vectors(adapter, - adapter->intr.num_intrs); + int i, nvec; + + nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? + 1 : adapter->num_tx_queues; + nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? + 0 : adapter->num_rx_queues; + nvec += 1; /* for link event */ + nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ? + nvec : VMXNET3_LINUX_MIN_MSIX_VECT; + + for (i = 0; i < nvec; i++) + adapter->intr.msix_entries[i].entry = i; + + nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); + if (nvec < 0) + goto msix_err; + /* If we cannot allocate one MSIx vector per queue * then limit the number of rx queues to 1 */ - if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { + if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; - printk(KERN_ERR "Number of rx queues : 1\n"); + netdev_err(adapter->netdev, + "Number of rx queues : 1\n"); adapter->num_rx_queues = 1; - adapter->intr.num_intrs = - VMXNET3_LINUX_MIN_MSIX_VECT; } - return; } - if (!err) - return; + adapter->intr.num_intrs = nvec; + return; + +msix_err: /* If we cannot allocate MSIx vectors use only one rx queue */ - printk(KERN_INFO "Failed to enable MSI-X for %s, error %d." - "#rx queues : 1, try MSI\n", adapter->netdev->name, err); + dev_info(&adapter->pdev->dev, + "Failed to enable MSI-X, error %d. " + "Limiting #rx queues to 1, try MSI.\n", nvec); adapter->intr.type = VMXNET3_IT_MSI; } if (adapter->intr.type == VMXNET3_IT_MSI) { - int err; - err = pci_enable_msi(adapter->pdev); - if (!err) { + if (!pci_enable_msi(adapter->pdev)) { adapter->num_rx_queues = 1; adapter->intr.num_intrs = 1; return; @@ -2810,7 +2845,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #endif /* CONFIG_PCI_MSI */ adapter->num_rx_queues = 1; - printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n"); + dev_info(&adapter->netdev->dev, + "Using INTx interrupt, #Rx queues: 1.\n"); adapter->intr.type = VMXNET3_IT_INTX; /* INT-X related setting */ @@ -2836,7 +2872,7 @@ vmxnet3_tx_timeout(struct net_device *netdev) struct vmxnet3_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); + netdev_err(adapter->netdev, "tx hang\n"); schedule_work(&adapter->work); netif_wake_queue(adapter->netdev); } @@ -2856,12 +2892,12 @@ vmxnet3_reset_work(struct work_struct *data) /* if the device is closed, we must leave it alone */ rtnl_lock(); if (netif_running(adapter->netdev)) { - printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); + netdev_notice(adapter->netdev, "resetting\n"); vmxnet3_quiesce_dev(adapter); vmxnet3_reset_dev(adapter); vmxnet3_activate_dev(adapter); } else { - printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); + netdev_info(adapter->netdev, "already closed\n"); } rtnl_unlock(); @@ -2869,7 +2905,7 @@ vmxnet3_reset_work(struct work_struct *data) } -static int __devinit +static int vmxnet3_probe_device(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -2920,62 +2956,69 @@ vmxnet3_probe_device(struct pci_dev *pdev, num_tx_queues = rounddown_pow_of_two(num_tx_queues); netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), max(num_tx_queues, num_rx_queues)); - printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", - num_tx_queues, num_rx_queues); + dev_info(&pdev->dev, + "# of Tx queues : %d, # of Rx queues : %d\n", + num_tx_queues, num_rx_queues); - if (!netdev) { - printk(KERN_ERR "Failed to alloc ethernet device for adapter " - "%s\n", pci_name(pdev)); + if (!netdev) return -ENOMEM; - } pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; + adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; + adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; + spin_lock_init(&adapter->cmd_lock); - adapter->shared = pci_alloc_consistent(adapter->pdev, - sizeof(struct Vmxnet3_DriverShared), - &adapter->shared_pa); + adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, + sizeof(struct vmxnet3_adapter), + PCI_DMA_TODEVICE); + adapter->shared = dma_alloc_coherent( + &adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + &adapter->shared_pa, GFP_KERNEL); if (!adapter->shared) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to allocate memory\n"); err = -ENOMEM; goto err_alloc_shared; } adapter->num_rx_queues = num_rx_queues; adapter->num_tx_queues = num_tx_queues; + adapter->rx_buf_per_pkt = 1; size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; - adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, - &adapter->queue_desc_pa); + adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, + &adapter->queue_desc_pa, + GFP_KERNEL); if (!adapter->tqd_start) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to allocate memory\n"); err = -ENOMEM; goto err_alloc_queue_desc; } adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + - adapter->num_tx_queues); + adapter->num_tx_queues); - adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); + adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_PMConf), + &adapter->pm_conf_pa, + GFP_KERNEL); if (adapter->pm_conf == NULL) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); err = -ENOMEM; goto err_alloc_pm; } #ifdef VMXNET3_RSS - adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); + adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct UPT1_RSSConf), + &adapter->rss_conf_pa, + GFP_KERNEL); if (adapter->rss_conf == NULL) { - printk(KERN_ERR "Failed to allocate memory for %s\n", - pci_name(pdev)); err = -ENOMEM; goto err_alloc_rss; } @@ -2989,8 +3032,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (ver & 1) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); } else { - printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" - " %s\n", ver, pci_name(pdev)); + dev_err(&pdev->dev, + "Incompatible h/w version (0x%x) for adapter\n", ver); err = -EBUSY; goto err_ver; } @@ -2999,8 +3042,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (ver & 1) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); } else { - printk(KERN_ERR "Incompatible upt version (0x%x) for " - "adapter %s\n", ver, pci_name(pdev)); + dev_err(&pdev->dev, + "Incompatible upt version (0x%x) for adapter\n", ver); err = -EBUSY; goto err_ver; } @@ -3008,11 +3051,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); vmxnet3_declare_features(adapter, dma64); - adapter->dev_number = atomic_read(&devices_found); - - adapter->share_intr = irq_share_mode; - if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE && - adapter->num_tx_queues != adapter->num_rx_queues) + if (adapter->num_tx_queues == adapter->num_rx_queues) + adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; + else adapter->share_intr = VMXNET3_INTR_DONTSHARE; vmxnet3_alloc_intr_resources(adapter); @@ -3021,7 +3062,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (adapter->num_rx_queues > 1 && adapter->intr.type == VMXNET3_IT_MSIX) { adapter->rss = true; - printk(KERN_INFO "RSS is enabled.\n"); + netdev->hw_features |= NETIF_F_RXHASH; + netdev->features |= NETIF_F_RXHASH; + dev_dbg(&pdev->dev, "RSS is enabled.\n"); } else { adapter->rss = false; } @@ -3035,6 +3078,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, netdev->watchdog_timeo = 5 * HZ; INIT_WORK(&adapter->work, vmxnet3_reset_work); + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); if (adapter->intr.type == VMXNET3_IT_MSIX) { int i; @@ -3051,17 +3095,15 @@ vmxnet3_probe_device(struct pci_dev *pdev, netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); + netif_carrier_off(netdev); err = register_netdev(netdev); if (err) { - printk(KERN_ERR "Failed to register adapter %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Failed to register adapter\n"); goto err_register; } - set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); vmxnet3_check_link(adapter, false); - atomic_inc(&devices_found); return 0; err_register: @@ -3070,24 +3112,28 @@ err_ver: vmxnet3_free_pci_resources(adapter); err_alloc_pci: #ifdef VMXNET3_RSS - kfree(adapter->rss_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), + adapter->rss_conf, adapter->rss_conf_pa); err_alloc_rss: #endif - kfree(adapter->pm_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), + adapter->pm_conf, adapter->pm_conf_pa); err_alloc_pm: - pci_free_consistent(adapter->pdev, size, adapter->tqd_start, - adapter->queue_desc_pa); + dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, + adapter->queue_desc_pa); err_alloc_queue_desc: - pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), - adapter->shared, adapter->shared_pa); + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); err_alloc_shared: - pci_set_drvdata(pdev, NULL); + dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, + sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); free_netdev(netdev); return err; } -static void __devexit +static void vmxnet3_remove_device(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3111,16 +3157,21 @@ vmxnet3_remove_device(struct pci_dev *pdev) vmxnet3_free_intr_resources(adapter); vmxnet3_free_pci_resources(adapter); #ifdef VMXNET3_RSS - kfree(adapter->rss_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), + adapter->rss_conf, adapter->rss_conf_pa); #endif - kfree(adapter->pm_conf); + dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), + adapter->pm_conf, adapter->pm_conf_pa); size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; - pci_free_consistent(adapter->pdev, size, adapter->tqd_start, - adapter->queue_desc_pa); - pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), - adapter->shared, adapter->shared_pa); + dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, + adapter->queue_desc_pa); + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); + dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, + sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); free_netdev(netdev); } @@ -3220,8 +3271,8 @@ skip_arp: adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confPA = + cpu_to_le64(adapter->pm_conf_pa); spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, @@ -3258,8 +3309,8 @@ vmxnet3_resume(struct device *device) adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confPA = + cpu_to_le64(adapter->pm_conf_pa); netif_device_attach(netdev); pci_set_power_state(pdev, PCI_D0); @@ -3293,7 +3344,7 @@ static struct pci_driver vmxnet3_driver = { .name = vmxnet3_driver_name, .id_table = vmxnet3_pciid_table, .probe = vmxnet3_probe_device, - .remove = __devexit_p(vmxnet3_remove_device), + .remove = vmxnet3_remove_device, #ifdef CONFIG_PM .driver.pm = &vmxnet3_pm_ops, #endif @@ -3303,7 +3354,7 @@ static struct pci_driver vmxnet3_driver = { static int __init vmxnet3_init_module(void) { - printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, + pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC, VMXNET3_DRIVER_VERSION_REPORT); return pci_register_driver(&vmxnet3_driver); } |
