diff options
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
| -rw-r--r-- | drivers/net/ethernet/realtek/8139cp.c | 205 |
1 files changed, 136 insertions, 69 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index abc79076f86..2bc728e65e2 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, cp->dev->stats.rx_bytes += skb->len; if (opts2 & RxVlanTagged) - __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); napi_gro_receive(&cp->napi, skb); } @@ -476,9 +476,9 @@ rx_status_loop: rx = 0; cpw16(IntrStatus, cp_rx_intr_mask); - while (1) { + while (rx < budget) { u32 status, len; - dma_addr_t mapping; + dma_addr_t mapping, new_mapping; struct sk_buff *skb, *new_skb; struct cp_desc *desc; const unsigned buflen = cp->rx_buf_sz; @@ -520,6 +520,14 @@ rx_status_loop: goto rx_next; } + new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { + dev->stats.rx_dropped++; + kfree_skb(new_skb); + goto rx_next; + } + dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); @@ -531,12 +539,11 @@ rx_status_loop: skb_put(skb, len); - mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, - PCI_DMA_FROMDEVICE); cp->rx_skb[rx_tail] = new_skb; cp_rx_skb(cp, skb, desc); rx++; + mapping = new_mapping; rx_next: cp->rx_ring[rx_tail].opts2 = 0; @@ -547,9 +554,6 @@ rx_next: else desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); - - if (rx >= budget) - break; } cp->rx_tail = rx_tail; @@ -563,7 +567,7 @@ rx_next: if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; - napi_gro_flush(napi); + napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); @@ -577,28 +581,30 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct cp_private *cp; + int handled = 0; u16 status; if (unlikely(dev == NULL)) return IRQ_NONE; cp = netdev_priv(dev); + spin_lock(&cp->lock); + status = cpr16(IntrStatus); if (!status || (status == 0xFFFF)) - return IRQ_NONE; + goto out_unlock; + + handled = 1; netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", status, cpr8(Cmd), cpr16(CpCmd)); cpw16(IntrStatus, status & ~cp_rx_intr_mask); - spin_lock(&cp->lock); - /* close possible race's with dev_close */ if (unlikely(!netif_running(dev))) { cpw16(IntrMask, 0); - spin_unlock(&cp->lock); - return IRQ_HANDLED; + goto out_unlock; } if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) @@ -612,7 +618,6 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) if (status & LinkChg) mii_check_media(&cp->mii_if, netif_msg_link(cp), false); - spin_unlock(&cp->lock); if (status & PciErr) { u16 pci_status; @@ -625,7 +630,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) /* TODO: reset hardware */ } - return IRQ_HANDLED; +out_unlock: + spin_unlock(&cp->lock); + + return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -635,9 +643,12 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) */ static void cp_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - cp_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; + + disable_irq(irq); + cp_interrupt(irq, dev); + enable_irq(irq); } #endif @@ -645,6 +656,7 @@ static void cp_tx (struct cp_private *cp) { unsigned tx_head = cp->tx_head; unsigned tx_tail = cp->tx_tail; + unsigned bytes_compl = 0, pkts_compl = 0; while (tx_tail != tx_head) { struct cp_desc *txd = cp->tx_ring + tx_tail; @@ -684,6 +696,8 @@ static void cp_tx (struct cp_private *cp) netif_dbg(cp, tx_done, cp->dev, "tx done, slot %d\n", tx_tail); } + bytes_compl += skb->len; + pkts_compl++; dev_kfree_skb_irq(skb); } @@ -694,6 +708,7 @@ static void cp_tx (struct cp_private *cp) cp->tx_tail = tx_tail; + netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(cp->dev); } @@ -704,6 +719,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; } +static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, + int first, int entry_last) +{ + int frag, index; + struct cp_desc *txd; + skb_frag_t *this_frag; + for (frag = 0; frag+first < entry_last; frag++) { + index = first+frag; + cp->tx_skb[index] = NULL; + txd = &cp->tx_ring[index]; + this_frag = &skb_shinfo(skb)->frags[frag]; + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), + skb_frag_size(this_frag), PCI_DMA_TODEVICE); + } +} + static netdev_tx_t cp_start_xmit (struct sk_buff *skb, struct net_device *dev) { @@ -737,6 +768,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, len = skb->len; mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) + goto out_dma_error; + txd->opts2 = opts2; txd->addr = cpu_to_le64(mapping); wmb(); @@ -774,6 +808,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, first_len = skb_headlen(skb); first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, first_mapping)) + goto out_dma_error; + cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); @@ -787,6 +824,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, mapping = dma_map_single(&cp->pdev->dev, skb_frag_address(this_frag), len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) { + unwind_tx_frag_mapping(cp, skb, first_entry, entry); + goto out_dma_error; + } + eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; ctrl = eor | len | DescOwn; @@ -840,16 +882,23 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, wmb(); } cp->tx_head = entry; + + netdev_sent_queue(dev, skb->len); netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); +out_unlock: spin_unlock_irqrestore(&cp->lock, intr_flags); cpw8(TxPoll, NormalTxPoll); return NETDEV_TX_OK; +out_dma_error: + dev_kfree_skb_any(skb); + cp->dev->stats.tx_dropped++; + goto out_unlock; } /* Set or clear the multicast filter for this adaptor. @@ -934,6 +983,8 @@ static void cp_stop_hw (struct cp_private *cp) cp->rx_tail = 0; cp->tx_head = cp->tx_tail = 0; + + netdev_reset_queue(cp->dev); } static void cp_reset_hw (struct cp_private *cp) @@ -954,14 +1005,48 @@ static void cp_reset_hw (struct cp_private *cp) static inline void cp_start_hw (struct cp_private *cp) { + dma_addr_t ring_dma; + cpw16(CpCmd, cp->cpcmd); + + /* + * These (at least TxRingAddr) need to be configured after the + * corresponding bits in CpCmd are enabled. Datasheet v1.6 ยง6.33 + * (C+ Command Register) recommends that these and more be configured + * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware + * it's been observed that the TxRingAddr is actually reset to garbage + * when C+ mode Tx is enabled in CpCmd. + */ + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + + /* + * Strictly speaking, the datasheet says this should be enabled + * *before* setting the descriptor addresses. But what, then, would + * prevent it from doing DMA to random unconfigured addresses? + * This variant appears to work fine. + */ cpw8(Cmd, RxOn | TxOn); + + netdev_reset_queue(cp->dev); +} + +static void cp_enable_irq(struct cp_private *cp) +{ + cpw16_f(IntrMask, cp_intr_mask); } static void cp_init_hw (struct cp_private *cp) { struct net_device *dev = cp->dev; - dma_addr_t ring_dma; cp_reset_hw(cp); @@ -984,21 +1069,8 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cpw16(MultiIntr, 0); - cpw16_f(IntrMask, cp_intr_mask); - cpw8_f(Cfg9346, Cfg9346_Lock); } @@ -1017,6 +1089,10 @@ static int cp_refill_rx(struct cp_private *cp) mapping = dma_map_single(&cp->pdev->dev, skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) { + kfree_skb(skb); + goto err_out; + } cp->rx_skb[i] = skb; cp->rx_ring[i].opts2 = 0; @@ -1054,17 +1130,22 @@ static int cp_init_rings (struct cp_private *cp) static int cp_alloc_rings (struct cp_private *cp) { + struct device *d = &cp->pdev->dev; void *mem; + int rc; - mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, - &cp->ring_dma, GFP_KERNEL); + mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; cp->rx_ring = mem; cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; - return cp_init_rings(cp); + rc = cp_init_rings(cp); + if (rc < 0) + dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + + return rc; } static void cp_clean_rings (struct cp_private *cp) @@ -1094,6 +1175,7 @@ static void cp_clean_rings (struct cp_private *cp) cp->dev->stats.tx_dropped++; } } + netdev_reset_queue(cp->dev); memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); @@ -1114,6 +1196,7 @@ static void cp_free_rings (struct cp_private *cp) static int cp_open (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; int rc; netif_dbg(cp, ifup, dev, "enabling interface\n"); @@ -1126,10 +1209,12 @@ static int cp_open (struct net_device *dev) cp_init_hw(cp); - rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); + rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev); if (rc) goto err_out_hw; + cp_enable_irq(cp); + netif_carrier_off(dev); mii_check_media(&cp->mii_if, netif_msg_link(cp), true); netif_start_queue(dev); @@ -1161,7 +1246,7 @@ static int cp_close (struct net_device *dev) spin_unlock_irqrestore(&cp->lock, flags); - free_irq(dev->irq, dev); + free_irq(cp->pdev->irq, dev); cp_free_rings(cp); return 0; @@ -1183,18 +1268,16 @@ static void cp_tx_timeout(struct net_device *dev) cp_clean_rings(cp); rc = cp_init_rings(cp); cp_start_hw(cp); + cp_enable_irq(cp); netif_wake_queue(dev); spin_unlock_irqrestore(&cp->lock, flags); } -#ifdef BROKEN static int cp_change_mtu(struct net_device *dev, int new_mtu) { struct cp_private *cp = netdev_priv(dev); - int rc; - unsigned long flags; /* check for invalid MTU, according to hardware limits */ if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) @@ -1207,22 +1290,12 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu) return 0; } - spin_lock_irqsave(&cp->lock, flags); - - cp_stop_hw(cp); /* stop h/w and free rings */ - cp_clean_rings(cp); - + /* network IS up, close it, reset MTU, and come up again. */ + cp_close(dev); dev->mtu = new_mtu; - cp_set_rxbufsize(cp); /* set new rx buf size */ - - rc = cp_init_rings(cp); /* realloc and restart h/w */ - cp_start_hw(cp); - - spin_unlock_irqrestore(&cp->lock, flags); - - return rc; + cp_set_rxbufsize(cp); + return cp_open(dev); } -#endif /* BROKEN */ static const char mii_2_8139_map[8] = { BasicModeCtrl, @@ -1405,7 +1478,7 @@ static int cp_set_features(struct net_device *dev, netdev_features_t features) else cp->cpcmd &= ~RxChkSum; - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) cp->cpcmd |= RxVlanOn; else cp->cpcmd &= ~RxVlanOn; @@ -1627,7 +1700,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) static void eeprom_cmd_end(void __iomem *ee_addr) { - writeb (~EE_CS, ee_addr); + writeb(0, ee_addr); eeprom_delay (); } @@ -1783,7 +1856,7 @@ static int cp_set_eeprom(struct net_device *dev, /* Put the board into D3cold state and wait for WakeUp signal */ static void cp_set_d3_state (struct cp_private *cp) { - pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */ + pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ pci_set_power_state (cp->pdev, PCI_D3hot); } @@ -1798,9 +1871,7 @@ static const struct net_device_ops cp_netdev_ops = { .ndo_start_xmit = cp_start_xmit, .ndo_tx_timeout = cp_tx_timeout, .ndo_set_features = cp_set_features, -#ifdef BROKEN .ndo_change_mtu = cp_change_mtu, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cp_poll_controller, @@ -1909,7 +1980,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) (unsigned long long)pciaddr); goto err_out_res; } - dev->base_addr = (unsigned long) regs; cp->regs = regs; cp_stop_hw(cp); @@ -1919,32 +1989,29 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 3; i++) ((__le16 *) (dev->dev_addr))[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->netdev_ops = &cp_netdev_ops; netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); dev->ethtool_ops = &cp_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; /* disabled by default until verified */ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; - dev->irq = pdev->irq; - rc = register_netdev(dev); if (rc) goto err_out_iomap; - netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", - dev->base_addr, dev->dev_addr, dev->irq); + netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n", + regs, dev->dev_addr, pdev->irq); pci_set_drvdata(pdev, dev); @@ -1981,7 +2048,6 @@ static void cp_remove_one (struct pci_dev *pdev) pci_release_regions(pdev); pci_clear_mwi(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); } @@ -2031,6 +2097,7 @@ static int cp_resume (struct pci_dev *pdev) /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ cp_init_rings_index (cp); cp_init_hw (cp); + cp_enable_irq(cp); netif_start_queue (dev); spin_lock_irqsave (&cp->lock, flags); |
