diff options
Diffstat (limited to 'drivers/net/ethernet/nvidia/forcedeth.c')
| -rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 251 |
1 files changed, 164 insertions, 87 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 4c4e7f45838..9afc536c573 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -26,8 +26,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. @@ -59,7 +58,6 @@ #include <linux/skbuff.h> #include <linux/mii.h> #include <linux/random.h> -#include <linux/init.h> #include <linux/if_vlan.h> #include <linux/dma-mapping.h> #include <linux/slab.h> @@ -69,7 +67,6 @@ #include <linux/io.h> #include <asm/irq.h> -#include <asm/system.h> #define TX_WORK_PER_LOOP 64 #define RX_WORK_PER_LOOP 64 @@ -409,7 +406,7 @@ union ring_type { #define NV_RX_DESCRIPTORVALID (1<<16) #define NV_RX_MISSEDFRAME (1<<17) -#define NV_RX_SUBSTRACT1 (1<<18) +#define NV_RX_SUBTRACT1 (1<<18) #define NV_RX_ERROR1 (1<<23) #define NV_RX_ERROR2 (1<<24) #define NV_RX_ERROR3 (1<<25) @@ -426,7 +423,7 @@ union ring_type { #define NV_RX2_CHECKSUM_IP_TCP (0x14000000) #define NV_RX2_CHECKSUM_IP_UDP (0x18000000) #define NV_RX2_DESCRIPTORVALID (1<<29) -#define NV_RX2_SUBSTRACT1 (1<<25) +#define NV_RX2_SUBTRACT1 (1<<25) #define NV_RX2_ERROR1 (1<<18) #define NV_RX2_ERROR2 (1<<19) #define NV_RX2_ERROR3 (1<<20) @@ -1756,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) /* software stats */ do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); storage->rx_packets = np->stat_rx_packets; storage->rx_bytes = np->stat_rx_bytes; storage->rx_dropped = np->stat_rx_dropped; storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); storage->tx_packets = np->stat_tx_packets; storage->tx_bytes = np->stat_tx_bytes; storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); /* If the nic supports hw counters then retrieve latest values */ if (np->driver_data & DEV_HAS_STATISTICS_V123) { @@ -1815,13 +1812,18 @@ static int nv_alloc_rx(struct net_device *dev) less_rx = np->last_rx.orig; while (np->put_rx.orig != less_rx) { - struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, skb_tailroom(skb), PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_rx_ctx->dma)) { + kfree_skb(skb); + goto packet_dropped; + } np->put_rx_ctx->dma_len = skb_tailroom(skb); np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); wmb(); @@ -1831,6 +1833,7 @@ static int nv_alloc_rx(struct net_device *dev) if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; } else { +packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); np->stat_rx_dropped++; u64_stats_update_end(&np->swstats_rx_syncp); @@ -1850,13 +1853,18 @@ static int nv_alloc_rx_optimized(struct net_device *dev) less_rx = np->last_rx.ex; while (np->put_rx.ex != less_rx) { - struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, skb_tailroom(skb), PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_rx_ctx->dma)) { + kfree_skb(skb); + goto packet_dropped; + } np->put_rx_ctx->dma_len = skb_tailroom(skb); np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); @@ -1867,6 +1875,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev) if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; } else { +packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); np->stat_rx_dropped++; u64_stats_update_end(&np->swstats_rx_syncp); @@ -2189,6 +2198,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) struct ring_desc *start_tx; struct ring_desc *prev_tx; struct nv_skb_map *prev_tx_ctx; + struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; unsigned long flags; /* add fragments to entries count */ @@ -2218,6 +2228,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_tx_ctx->dma)) { + /* on DMA mapping error - drop the packet */ + dev_kfree_skb_any(skb); + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 1; put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); @@ -2241,12 +2260,31 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) do { prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; + if (!start_tx_ctx) + start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; + bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; np->put_tx_ctx->dma = skb_frag_dma_map( &np->pci_dev->dev, frag, offset, bcnt, DMA_TO_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { + + /* Unwind the mapped fragments */ + do { + nv_unmap_txskb(np, start_tx_ctx); + if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) + tmp_tx_ctx = np->first_tx_ctx; + } while (tmp_tx_ctx != np->put_tx_ctx); + dev_kfree_skb_any(skb); + np->put_tx_ctx = start_tx_ctx; + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } + np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 0; put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); @@ -2280,6 +2318,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_sent_queue(np->dev, skb->len); + skb_tx_timestamp(skb); + np->put_tx.orig = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -2305,7 +2345,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, struct ring_desc_ex *start_tx; struct ring_desc_ex *prev_tx; struct nv_skb_map *prev_tx_ctx; - struct nv_skb_map *start_tx_ctx; + struct nv_skb_map *start_tx_ctx = NULL; + struct nv_skb_map *tmp_tx_ctx = NULL; unsigned long flags; /* add fragments to entries count */ @@ -2336,6 +2377,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_tx_ctx->dma)) { + /* on DMA mapping error - drop the packet */ + dev_kfree_skb_any(skb); + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 1; put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); @@ -2361,11 +2411,29 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; + if (!start_tx_ctx) + start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; np->put_tx_ctx->dma = skb_frag_dma_map( &np->pci_dev->dev, frag, offset, bcnt, DMA_TO_DEVICE); + + if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { + + /* Unwind the mapped fragments */ + do { + nv_unmap_txskb(np, start_tx_ctx); + if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) + tmp_tx_ctx = np->first_tx_ctx; + } while (tmp_tx_ctx != np->put_tx_ctx); + dev_kfree_skb_any(skb); + np->put_tx_ctx = start_tx_ctx; + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 0; put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); @@ -2427,6 +2495,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, netdev_sent_queue(np->dev, skb->len); + skb_tx_timestamp(skb); + np->put_tx.ex = put_tx; spin_unlock_irqrestore(&np->lock, flags); @@ -2762,7 +2832,7 @@ static int nv_rx_process(struct net_device *dev, int limit) } /* framing errors are soft errors */ else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { - if (flags & NV_RX_SUBSTRACT1) + if (flags & NV_RX_SUBTRACT1) len--; } /* the rest are hard errors */ @@ -2793,7 +2863,7 @@ static int nv_rx_process(struct net_device *dev, int limit) } /* framing errors are soft errors */ else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { - if (flags & NV_RX2_SUBSTRACT1) + if (flags & NV_RX2_SUBTRACT1) len--; } /* the rest are hard errors */ @@ -2867,7 +2937,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } /* framing errors are soft errors */ else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { - if (flags & NV_RX2_SUBSTRACT1) + if (flags & NV_RX2_SUBTRACT1) len--; } /* the rest are hard errors */ @@ -2889,15 +2959,15 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) vlanflags = le32_to_cpu(np->get_rx.ex->buflow); /* - * There's need to check for NETIF_F_HW_VLAN_RX here. - * Even if vlan rx accel is disabled, + * There's need to check for NETIF_F_HW_VLAN_CTAG_RX + * here. Even if vlan rx accel is disabled, * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_RX && + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && vlanflags & NV_RX3_VLAN_TAG_PRESENT) { u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } napi_gro_receive(&np->napi, skb); u64_stats_update_begin(&np->swstats_rx_syncp); @@ -3214,7 +3284,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) } /** - * nv_update_linkspeed: Setup the MAC according to the link partner + * nv_update_linkspeed - Setup the MAC according to the link partner * @dev: Network device to be configured * * The function queries the PHY and checks if there is a link partner. @@ -3405,7 +3475,7 @@ set_speed: pause_flags = 0; /* setup pause frame */ - if (np->duplex != 0) { + if (netif_running(dev) && (np->duplex != 0)) { if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); @@ -3548,8 +3618,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) return IRQ_HANDLED; } -/** - * All _optimized functions are used to help increase performance +/* All _optimized functions are used to help increase performance * (reduce CPU and increase throughput). They use descripter version 3, * compiler directives, and reduce memory accesses. */ @@ -3772,7 +3841,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) np->link_timeout = jiffies + LINK_TIMEOUT; } if (events & NVREG_IRQ_RECOVER_ERROR) { - spin_lock_irq(&np->lock); + spin_lock_irqsave(&np->lock, flags); /* disable interrupts on the nic */ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); pci_push(base); @@ -3782,7 +3851,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) np->recover_error = 1; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } - spin_unlock_irq(&np->lock); + spin_unlock_irqrestore(&np->lock, flags); break; } if (unlikely(i > max_interrupt_work)) { @@ -3861,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test) { struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); - int ret = 1; + int ret; int i; irqreturn_t (*handler)(int foo, void *data); @@ -3877,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test) if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) np->msi_x_entry[i].entry = i; - ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK)); - if (ret == 0) { + ret = pci_enable_msix_range(np->pci_dev, + np->msi_x_entry, + np->msi_flags & NV_MSI_X_VECTORS_MASK, + np->msi_flags & NV_MSI_X_VECTORS_MASK); + if (ret > 0) { np->msi_flags |= NV_MSI_X_ENABLED; if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { /* Request irq for rx handling */ sprintf(np->name_rx, "%s-rx", dev->name); - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, - nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, + nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); + if (ret) { netdev_info(dev, "request_irq failed for rx %d\n", ret); @@ -3894,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test) } /* Request irq for tx handling */ sprintf(np->name_tx, "%s-tx", dev->name); - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, - nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, + nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); + if (ret) { netdev_info(dev, "request_irq failed for tx %d\n", ret); @@ -3905,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test) } /* Request irq for link and timer handling */ sprintf(np->name_other, "%s-other", dev->name); - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, - nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, + nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); + if (ret) { netdev_info(dev, "request_irq failed for link %d\n", ret); @@ -3922,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test) set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); } else { /* Request irq for all interrupts */ - if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, + handler, IRQF_SHARED, dev->name, dev); + if (ret) { netdev_info(dev, "request_irq failed %d\n", ret); @@ -3936,19 +4013,19 @@ static int nv_request_irq(struct net_device *dev, int intr_test) writel(0, base + NvRegMSIXMap1); } netdev_info(dev, "MSI-X enabled\n"); + return 0; } } - if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { + if (np->msi_flags & NV_MSI_CAPABLE) { ret = pci_enable_msi(np->pci_dev); if (ret == 0) { np->msi_flags |= NV_MSI_ENABLED; - dev->irq = np->pci_dev->irq; - if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { + ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); + if (ret) { netdev_info(dev, "request_irq failed %d\n", ret); pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; - dev->irq = np->pci_dev->irq; goto out_err; } @@ -3958,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test) /* enable msi vector 0 */ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); netdev_info(dev, "MSI enabled\n"); + return 0; } } - if (ret != 0) { - if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) - goto out_err; - } + if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) + goto out_err; return 0; out_free_tx: @@ -4209,8 +4285,8 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (np->duplex) ecmd->duplex = DUPLEX_FULL; } else { - speed = -1; - ecmd->duplex = -1; + speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; } ethtool_cmd_speed_set(ecmd, speed); ecmd->autoneg = np->autoneg; @@ -4434,7 +4510,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void regs->version = FORCEDETH_REGS_VER; spin_lock_irq(&np->lock); - for (i = 0; i <= np->register_size/sizeof(u32); i++) + for (i = 0; i < np->register_size/sizeof(u32); i++) rbuf[i] = readl(base + i*sizeof(u32)); spin_unlock_irq(&np->lock); } @@ -4747,7 +4823,7 @@ static netdev_features_t nv_fix_features(struct net_device *dev, netdev_features_t features) { /* vlan is dependent on rx checksum offload */ - if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) + if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) features |= NETIF_F_RXCSUM; return features; @@ -4759,12 +4835,12 @@ static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) spin_lock_irq(&np->lock); - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; else np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; - if (features & NETIF_F_HW_VLAN_TX) + if (features & NETIF_F_HW_VLAN_CTAG_TX) np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; else np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; @@ -4801,7 +4877,7 @@ static int nv_set_features(struct net_device *dev, netdev_features_t features) spin_unlock_irq(&np->lock); } - if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)) + if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)) nv_vlan_mode(dev, features); return 0; @@ -4993,15 +5069,19 @@ static int nv_loopback_test(struct net_device *dev) /* setup packet for tx */ pkt_len = ETH_DATA_LEN; - tx_skb = dev_alloc_skb(pkt_len); + tx_skb = netdev_alloc_skb(dev, pkt_len); if (!tx_skb) { - netdev_err(dev, "dev_alloc_skb() failed during loopback test\n"); ret = 0; goto out; } test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, skb_tailroom(tx_skb), PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + test_dma_addr)) { + dev_kfree_skb_any(tx_skb); + goto out; + } pkt_data = skb_put(tx_skb, pkt_len); for (i = 0; i < pkt_len; i++) pkt_data[i] = (u8)(i & 0xff); @@ -5077,8 +5157,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - int result; - memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); + int result, count; + + count = nv_get_sset_count(dev, ETH_SS_TEST); + memset(buffer, 0, count * sizeof(u64)); if (!nv_link_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; @@ -5122,7 +5204,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 return; } - if (!nv_loopback_test(dev)) { + if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; buffer[3] = 1; } @@ -5181,6 +5263,7 @@ static const struct ethtool_ops ops = { .get_ethtool_stats = nv_get_ethtool_stats, .get_sset_count = nv_get_sset_count, .self_test = nv_self_test, + .get_ts_info = ethtool_op_get_ts_info, }; /* The mgmt unit and driver use a semaphore to access the phy during init */ @@ -5453,6 +5536,7 @@ static int nv_close(struct net_device *dev) netif_stop_queue(dev); spin_lock_irq(&np->lock); + nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */ nv_stop_rxtx(dev); nv_txrx_reset(dev); @@ -5517,7 +5601,7 @@ static const struct net_device_ops nv_netdev_ops_optimized = { #endif }; -static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct net_device *dev; struct fe_priv *np; @@ -5544,6 +5628,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i spin_lock_init(&np->lock); spin_lock_init(&np->hwstats_lock); SET_NETDEV_DEV(dev, &pci_dev->dev); + u64_stats_init(&np->swstats_rx_syncp); + u64_stats_init(&np->swstats_tx_syncp); init_timer(&np->oom_kick); np->oom_kick.data = (unsigned long) dev; @@ -5630,7 +5716,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->vlanctl_bits = 0; if (id->driver_data & DEV_HAS_VLAN) { np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; - dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; } dev->features |= dev->hw_features; @@ -5649,9 +5736,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->base = ioremap(addr, np->register_size); if (!np->base) goto out_relreg; - dev->base_addr = (unsigned long)np->base; - - dev->irq = pci_dev->irq; np->rx_ring_size = RX_RING_DEFAULT; np->tx_ring_size = TX_RING_DEFAULT; @@ -5682,7 +5766,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev->netdev_ops = &nv_netdev_ops_optimized; netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); - SET_ETHTOOL_OPS(dev, &ops); + dev->ethtool_ops = &ops; dev->watchdog_timeo = NV_WATCHDOG_TIMEO; pci_set_drvdata(pci_dev, dev); @@ -5731,9 +5815,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i "%s: set workaround bit for reversed mac addr\n", __func__); } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - if (!is_valid_ether_addr(dev->perm_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { /* * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab @@ -5741,7 +5824,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev_err(&pci_dev->dev, "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", dev->dev_addr); - random_ether_addr(dev->dev_addr); + eth_hw_addr_random(dev); dev_err(&pci_dev->dev, "Using random MAC address: %pM\n", dev->dev_addr); } @@ -5905,11 +5988,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } + netif_carrier_off(dev); + + /* Some NICs freeze when TX pause is enabled while NIC is + * down, and this stays across warm reboots. The sequence + * below should be enough to recover from that state. + */ + nv_update_pause(dev, 0); + nv_start_tx(dev); + nv_stop_tx(dev); + if (id->driver_data & DEV_HAS_VLAN) nv_vlan_mode(dev, dev->features); - netif_carrier_off(dev); - dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); @@ -5917,7 +6008,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev->features & NETIF_F_HIGHDMA ? "highdma " : "", dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? "csum " : "", - dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? + dev->features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX) ? "vlan " : "", dev->features & (NETIF_F_LOOPBACK) ? "loopback " : "", @@ -5935,7 +6027,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i out_error: if (phystate_orig) writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); - pci_set_drvdata(pci_dev, NULL); out_freering: free_rings(dev); out_unmap: @@ -5987,7 +6078,7 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev) base + NvRegTransmitPoll); } -static void __devexit nv_remove(struct pci_dev *pci_dev) +static void nv_remove(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); @@ -6006,7 +6097,6 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) pci_release_regions(pci_dev); pci_disable_device(pci_dev); free_netdev(dev); - pci_set_drvdata(pci_dev, NULL); } #ifdef CONFIG_PM_SLEEP @@ -6259,25 +6349,15 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { {0,}, }; -static struct pci_driver driver = { +static struct pci_driver forcedeth_pci_driver = { .name = DRV_NAME, .id_table = pci_tbl, .probe = nv_probe, - .remove = __devexit_p(nv_remove), + .remove = nv_remove, .shutdown = nv_shutdown, .driver.pm = NV_PM_OPS, }; -static int __init init_nic(void) -{ - return pci_register_driver(&driver); -} - -static void __exit exit_nic(void) -{ - pci_unregister_driver(&driver); -} - module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); module_param(optimization_mode, int, 0); @@ -6298,11 +6378,8 @@ module_param(debug_tx_timeout, bool, 0); MODULE_PARM_DESC(debug_tx_timeout, "Dump tx related registers and ring when tx_timeout happens"); +module_pci_driver(forcedeth_pci_driver); MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); MODULE_LICENSE("GPL"); - MODULE_DEVICE_TABLE(pci, pci_tbl); - -module_init(init_nic); -module_exit(exit_nic); |
