diff options
Diffstat (limited to 'drivers/net/ethernet/realtek')
| -rw-r--r-- | drivers/net/ethernet/realtek/8139cp.c | 223 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/8139too.c | 278 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/Kconfig | 29 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/Makefile | 1 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/atp.c | 63 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/atp.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/r8169.c | 3662 | ||||
| -rw-r--r-- | drivers/net/ethernet/realtek/sc92031.c | 1609 |
8 files changed, 2655 insertions, 3212 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index ee5da9293ce..2bc728e65e2 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, cp->dev->stats.rx_bytes += skb->len; if (opts2 & RxVlanTagged) - __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); napi_gro_receive(&cp->napi, skb); } @@ -476,9 +476,9 @@ rx_status_loop: rx = 0; cpw16(IntrStatus, cp_rx_intr_mask); - while (1) { + while (rx < budget) { u32 status, len; - dma_addr_t mapping; + dma_addr_t mapping, new_mapping; struct sk_buff *skb, *new_skb; struct cp_desc *desc; const unsigned buflen = cp->rx_buf_sz; @@ -520,6 +520,14 @@ rx_status_loop: goto rx_next; } + new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { + dev->stats.rx_dropped++; + kfree_skb(new_skb); + goto rx_next; + } + dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); @@ -531,12 +539,11 @@ rx_status_loop: skb_put(skb, len); - mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, - PCI_DMA_FROMDEVICE); cp->rx_skb[rx_tail] = new_skb; cp_rx_skb(cp, skb, desc); rx++; + mapping = new_mapping; rx_next: cp->rx_ring[rx_tail].opts2 = 0; @@ -547,9 +554,6 @@ rx_next: else desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); - - if (rx >= budget) - break; } cp->rx_tail = rx_tail; @@ -563,6 +567,7 @@ rx_next: if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; + napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); @@ -576,28 +581,30 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct cp_private *cp; + int handled = 0; u16 status; if (unlikely(dev == NULL)) return IRQ_NONE; cp = netdev_priv(dev); + spin_lock(&cp->lock); + status = cpr16(IntrStatus); if (!status || (status == 0xFFFF)) - return IRQ_NONE; + goto out_unlock; + + handled = 1; netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", status, cpr8(Cmd), cpr16(CpCmd)); cpw16(IntrStatus, status & ~cp_rx_intr_mask); - spin_lock(&cp->lock); - /* close possible race's with dev_close */ if (unlikely(!netif_running(dev))) { cpw16(IntrMask, 0); - spin_unlock(&cp->lock); - return IRQ_HANDLED; + goto out_unlock; } if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) @@ -611,7 +618,6 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) if (status & LinkChg) mii_check_media(&cp->mii_if, netif_msg_link(cp), false); - spin_unlock(&cp->lock); if (status & PciErr) { u16 pci_status; @@ -624,7 +630,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) /* TODO: reset hardware */ } - return IRQ_HANDLED; +out_unlock: + spin_unlock(&cp->lock); + + return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -634,9 +643,12 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) */ static void cp_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - cp_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; + + disable_irq(irq); + cp_interrupt(irq, dev); + enable_irq(irq); } #endif @@ -644,6 +656,7 @@ static void cp_tx (struct cp_private *cp) { unsigned tx_head = cp->tx_head; unsigned tx_tail = cp->tx_tail; + unsigned bytes_compl = 0, pkts_compl = 0; while (tx_tail != tx_head) { struct cp_desc *txd = cp->tx_ring + tx_tail; @@ -683,6 +696,8 @@ static void cp_tx (struct cp_private *cp) netif_dbg(cp, tx_done, cp->dev, "tx done, slot %d\n", tx_tail); } + bytes_compl += skb->len; + pkts_compl++; dev_kfree_skb_irq(skb); } @@ -693,6 +708,7 @@ static void cp_tx (struct cp_private *cp) cp->tx_tail = tx_tail; + netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(cp->dev); } @@ -703,6 +719,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; } +static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, + int first, int entry_last) +{ + int frag, index; + struct cp_desc *txd; + skb_frag_t *this_frag; + for (frag = 0; frag+first < entry_last; frag++) { + index = first+frag; + cp->tx_skb[index] = NULL; + txd = &cp->tx_ring[index]; + this_frag = &skb_shinfo(skb)->frags[frag]; + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), + skb_frag_size(this_frag), PCI_DMA_TODEVICE); + } +} + static netdev_tx_t cp_start_xmit (struct sk_buff *skb, struct net_device *dev) { @@ -736,6 +768,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, len = skb->len; mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) + goto out_dma_error; + txd->opts2 = opts2; txd->addr = cpu_to_le64(mapping); wmb(); @@ -773,6 +808,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, first_len = skb_headlen(skb); first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, first_mapping)) + goto out_dma_error; + cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); @@ -786,6 +824,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, mapping = dma_map_single(&cp->pdev->dev, skb_frag_address(this_frag), len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) { + unwind_tx_frag_mapping(cp, skb, first_entry, entry); + goto out_dma_error; + } + eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; ctrl = eor | len | DescOwn; @@ -839,16 +882,23 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, wmb(); } cp->tx_head = entry; + + netdev_sent_queue(dev, skb->len); netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); +out_unlock: spin_unlock_irqrestore(&cp->lock, intr_flags); cpw8(TxPoll, NormalTxPoll); return NETDEV_TX_OK; +out_dma_error: + dev_kfree_skb_any(skb); + cp->dev->stats.tx_dropped++; + goto out_unlock; } /* Set or clear the multicast filter for this adaptor. @@ -859,7 +909,6 @@ static void __cp_set_rx_mode (struct net_device *dev) struct cp_private *cp = netdev_priv(dev); u32 mc_filter[2]; /* Multicast hash filter */ int rx_mode; - u32 tmp; /* Note: do not reorder, GCC is clever about common statements. */ if (dev->flags & IFF_PROMISC) { @@ -886,11 +935,9 @@ static void __cp_set_rx_mode (struct net_device *dev) } /* We can safely update without stopping the chip. */ - tmp = cp_rx_config | rx_mode; - if (cp->rx_config != tmp) { - cpw32_f (RxConfig, tmp); - cp->rx_config = tmp; - } + cp->rx_config = cp_rx_config | rx_mode; + cpw32_f(RxConfig, cp->rx_config); + cpw32_f (MAR0 + 0, mc_filter[0]); cpw32_f (MAR0 + 4, mc_filter[1]); } @@ -936,6 +983,8 @@ static void cp_stop_hw (struct cp_private *cp) cp->rx_tail = 0; cp->tx_head = cp->tx_tail = 0; + + netdev_reset_queue(cp->dev); } static void cp_reset_hw (struct cp_private *cp) @@ -956,14 +1005,48 @@ static void cp_reset_hw (struct cp_private *cp) static inline void cp_start_hw (struct cp_private *cp) { + dma_addr_t ring_dma; + cpw16(CpCmd, cp->cpcmd); + + /* + * These (at least TxRingAddr) need to be configured after the + * corresponding bits in CpCmd are enabled. Datasheet v1.6 ยง6.33 + * (C+ Command Register) recommends that these and more be configured + * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware + * it's been observed that the TxRingAddr is actually reset to garbage + * when C+ mode Tx is enabled in CpCmd. + */ + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + + /* + * Strictly speaking, the datasheet says this should be enabled + * *before* setting the descriptor addresses. But what, then, would + * prevent it from doing DMA to random unconfigured addresses? + * This variant appears to work fine. + */ cpw8(Cmd, RxOn | TxOn); + + netdev_reset_queue(cp->dev); +} + +static void cp_enable_irq(struct cp_private *cp) +{ + cpw16_f(IntrMask, cp_intr_mask); } static void cp_init_hw (struct cp_private *cp) { struct net_device *dev = cp->dev; - dma_addr_t ring_dma; cp_reset_hw(cp); @@ -986,21 +1069,8 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cpw16(MultiIntr, 0); - cpw16_f(IntrMask, cp_intr_mask); - cpw8_f(Cfg9346, Cfg9346_Lock); } @@ -1019,6 +1089,10 @@ static int cp_refill_rx(struct cp_private *cp) mapping = dma_map_single(&cp->pdev->dev, skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) { + kfree_skb(skb); + goto err_out; + } cp->rx_skb[i] = skb; cp->rx_ring[i].opts2 = 0; @@ -1056,17 +1130,22 @@ static int cp_init_rings (struct cp_private *cp) static int cp_alloc_rings (struct cp_private *cp) { + struct device *d = &cp->pdev->dev; void *mem; + int rc; - mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, - &cp->ring_dma, GFP_KERNEL); + mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; cp->rx_ring = mem; cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; - return cp_init_rings(cp); + rc = cp_init_rings(cp); + if (rc < 0) + dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + + return rc; } static void cp_clean_rings (struct cp_private *cp) @@ -1096,6 +1175,7 @@ static void cp_clean_rings (struct cp_private *cp) cp->dev->stats.tx_dropped++; } } + netdev_reset_queue(cp->dev); memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); @@ -1116,6 +1196,7 @@ static void cp_free_rings (struct cp_private *cp) static int cp_open (struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; int rc; netif_dbg(cp, ifup, dev, "enabling interface\n"); @@ -1128,10 +1209,12 @@ static int cp_open (struct net_device *dev) cp_init_hw(cp); - rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); + rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev); if (rc) goto err_out_hw; + cp_enable_irq(cp); + netif_carrier_off(dev); mii_check_media(&cp->mii_if, netif_msg_link(cp), true); netif_start_queue(dev); @@ -1163,7 +1246,7 @@ static int cp_close (struct net_device *dev) spin_unlock_irqrestore(&cp->lock, flags); - free_irq(dev->irq, dev); + free_irq(cp->pdev->irq, dev); cp_free_rings(cp); return 0; @@ -1185,18 +1268,16 @@ static void cp_tx_timeout(struct net_device *dev) cp_clean_rings(cp); rc = cp_init_rings(cp); cp_start_hw(cp); + cp_enable_irq(cp); netif_wake_queue(dev); spin_unlock_irqrestore(&cp->lock, flags); } -#ifdef BROKEN static int cp_change_mtu(struct net_device *dev, int new_mtu) { struct cp_private *cp = netdev_priv(dev); - int rc; - unsigned long flags; /* check for invalid MTU, according to hardware limits */ if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) @@ -1209,22 +1290,12 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu) return 0; } - spin_lock_irqsave(&cp->lock, flags); - - cp_stop_hw(cp); /* stop h/w and free rings */ - cp_clean_rings(cp); - + /* network IS up, close it, reset MTU, and come up again. */ + cp_close(dev); dev->mtu = new_mtu; - cp_set_rxbufsize(cp); /* set new rx buf size */ - - rc = cp_init_rings(cp); /* realloc and restart h/w */ - cp_start_hw(cp); - - spin_unlock_irqrestore(&cp->lock, flags); - - return rc; + cp_set_rxbufsize(cp); + return cp_open(dev); } -#endif /* BROKEN */ static const char mii_2_8139_map[8] = { BasicModeCtrl, @@ -1319,9 +1390,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info { struct cp_private *cp = netdev_priv(dev); - strcpy (info->driver, DRV_NAME); - strcpy (info->version, DRV_VERSION); - strcpy (info->bus_info, pci_name(cp->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); } static void cp_get_ringparam(struct net_device *dev, @@ -1392,7 +1463,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value) cp->msg_enable = value; } -static int cp_set_features(struct net_device *dev, u32 features) +static int cp_set_features(struct net_device *dev, netdev_features_t features) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; @@ -1407,7 +1478,7 @@ static int cp_set_features(struct net_device *dev, u32 features) else cp->cpcmd &= ~RxChkSum; - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) cp->cpcmd |= RxVlanOn; else cp->cpcmd &= ~RxVlanOn; @@ -1589,7 +1660,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p) No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ -#define eeprom_delay() readl(ee_addr) +#define eeprom_delay() readb(ee_addr) /* The EEPROM commands include the alway-set leading bit. */ #define EE_EXTEND_CMD (4) @@ -1629,7 +1700,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) static void eeprom_cmd_end(void __iomem *ee_addr) { - writeb (~EE_CS, ee_addr); + writeb(0, ee_addr); eeprom_delay (); } @@ -1785,7 +1856,7 @@ static int cp_set_eeprom(struct net_device *dev, /* Put the board into D3cold state and wait for WakeUp signal */ static void cp_set_d3_state (struct cp_private *cp) { - pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */ + pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ pci_set_power_state (cp->pdev, PCI_D3hot); } @@ -1800,9 +1871,7 @@ static const struct net_device_ops cp_netdev_ops = { .ndo_start_xmit = cp_start_xmit, .ndo_tx_timeout = cp_tx_timeout, .ndo_set_features = cp_set_features, -#ifdef BROKEN .ndo_change_mtu = cp_change_mtu, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cp_poll_controller, @@ -1911,7 +1980,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) (unsigned long long)pciaddr); goto err_out_res; } - dev->base_addr = (unsigned long) regs; cp->regs = regs; cp_stop_hw(cp); @@ -1921,32 +1989,29 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 3; i++) ((__le16 *) (dev->dev_addr))[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->netdev_ops = &cp_netdev_ops; netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); dev->ethtool_ops = &cp_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; /* disabled by default until verified */ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; - dev->irq = pdev->irq; - rc = register_netdev(dev); if (rc) goto err_out_iomap; - netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", - dev->base_addr, dev->dev_addr, dev->irq); + netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n", + regs, dev->dev_addr, pdev->irq); pci_set_drvdata(pdev, dev); @@ -1983,7 +2048,6 @@ static void cp_remove_one (struct pci_dev *pdev) pci_release_regions(pdev); pci_clear_mwi(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); } @@ -2033,6 +2097,7 @@ static int cp_resume (struct pci_dev *pdev) /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ cp_init_rings_index (cp); cp_init_hw (cp); + cp_enable_irq(cp); netif_start_queue (dev); spin_lock_irqsave (&cp->lock, flags); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 4d6b254fc6c..2e5df148af4 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -148,9 +148,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* Whether to use MMIO or PIO. Default to MMIO. */ #ifdef CONFIG_8139TOO_PIO -static int use_io = 1; +static bool use_io = true; #else -static int use_io = 0; +static bool use_io = false; #endif /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). @@ -228,7 +228,7 @@ typedef enum { static const struct { const char *name; u32 hw_flags; -} board_info[] __devinitdata = { +} board_info[] = { { "RealTek RTL8139", RTL8139_CAPS }, { "RealTek RTL8129", RTL8129_CAPS }, }; @@ -565,6 +565,12 @@ struct rtl_extra_stats { unsigned long rx_lost_in_ring; }; +struct rtl8139_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + struct rtl8139_private { void __iomem *mmio_addr; int drv_flags; @@ -575,11 +581,13 @@ struct rtl8139_private { unsigned char *rx_ring; unsigned int cur_rx; /* RX buf index of next pkt */ + struct rtl8139_stats rx_stats; dma_addr_t rx_ring_dma; unsigned int tx_flag; unsigned long cur_tx; unsigned long dirty_tx; + struct rtl8139_stats tx_stats; unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ unsigned char *tx_bufs; /* Tx bounce buffer region. */ dma_addr_t tx_bufs_dma; @@ -612,7 +620,7 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -module_param(use_io, int, 0); +module_param(use_io, bool, 0); MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); module_param(multicast_filter_limit, int, 0); module_param_array(media, int, NULL, 0); @@ -641,7 +649,9 @@ static int rtl8139_poll(struct napi_struct *napi, int budget); static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static struct net_device_stats *rtl8139_get_stats (struct net_device *dev); +static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 + *stats); static void rtl8139_set_rx_mode (struct net_device *dev); static void __set_rx_mode (struct net_device *dev); static void rtl8139_hw_start (struct net_device *dev); @@ -717,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev) pci_release_regions (pdev); free_netdev(dev); - pci_set_drvdata (pdev, NULL); } @@ -738,26 +747,32 @@ static void rtl8139_chip_reset (void __iomem *ioaddr) } -static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) +static struct net_device *rtl8139_init_board(struct pci_dev *pdev) { + struct device *d = &pdev->dev; void __iomem *ioaddr; struct net_device *dev; struct rtl8139_private *tp; u8 tmp8; int rc, disable_dev_on_err = 0; - unsigned int i; - unsigned long pio_start, pio_end, pio_flags, pio_len; - unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; + unsigned int i, bar; + unsigned long io_len; u32 version; + static const struct { + unsigned long mask; + char *type; + } res[] = { + { IORESOURCE_IO, "PIO" }, + { IORESOURCE_MEM, "MMIO" } + }; assert (pdev != NULL); /* dev and priv zeroed in alloc_etherdev */ dev = alloc_etherdev (sizeof (*tp)); - if (dev == NULL) { - dev_err(&pdev->dev, "Unable to alloc new net device\n"); + if (dev == NULL) return ERR_PTR(-ENOMEM); - } + SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); @@ -768,78 +783,48 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) if (rc) goto err_out; - pio_start = pci_resource_start (pdev, 0); - pio_end = pci_resource_end (pdev, 0); - pio_flags = pci_resource_flags (pdev, 0); - pio_len = pci_resource_len (pdev, 0); - - mmio_start = pci_resource_start (pdev, 1); - mmio_end = pci_resource_end (pdev, 1); - mmio_flags = pci_resource_flags (pdev, 1); - mmio_len = pci_resource_len (pdev, 1); - - /* set this immediately, we need to know before - * we talk to the chip directly */ - pr_debug("PIO region size == 0x%02lX\n", pio_len); - pr_debug("MMIO region size == 0x%02lX\n", mmio_len); - -retry: - if (use_io) { - /* make sure PCI base addr 0 is PIO */ - if (!(pio_flags & IORESOURCE_IO)) { - dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n"); - rc = -ENODEV; - goto err_out; - } - /* check for weird/broken PCI region reporting */ - if (pio_len < RTL_MIN_IO_SIZE) { - dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n"); - rc = -ENODEV; - goto err_out; - } - } else { - /* make sure PCI base addr 1 is MMIO */ - if (!(mmio_flags & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); - rc = -ENODEV; - goto err_out; - } - if (mmio_len < RTL_MIN_IO_SIZE) { - dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n"); - rc = -ENODEV; - goto err_out; - } - } - rc = pci_request_regions (pdev, DRV_NAME); if (rc) goto err_out; disable_dev_on_err = 1; - /* enable PCI bus-mastering */ pci_set_master (pdev); - if (use_io) { - ioaddr = pci_iomap(pdev, 0, 0); - if (!ioaddr) { - dev_err(&pdev->dev, "cannot map PIO, aborting\n"); - rc = -EIO; - goto err_out; - } - dev->base_addr = pio_start; - tp->regs_len = pio_len; - } else { - /* ioremap MMIO region */ - ioaddr = pci_iomap(pdev, 1, 0); - if (ioaddr == NULL) { - dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n"); - pci_release_regions(pdev); - use_io = 1; + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + +retry: + /* PIO bar register comes first. */ + bar = !use_io; + + io_len = pci_resource_len(pdev, bar); + + dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len); + + if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) { + dev_err(d, "region #%d not a %s resource, aborting\n", bar, + res[bar].type); + rc = -ENODEV; + goto err_out; + } + if (io_len < RTL_MIN_IO_SIZE) { + dev_err(d, "Invalid PCI %s region size(s), aborting\n", + res[bar].type); + rc = -ENODEV; + goto err_out; + } + + ioaddr = pci_iomap(pdev, bar, 0); + if (!ioaddr) { + dev_err(d, "cannot map %s\n", res[bar].type); + if (!use_io) { + use_io = true; goto retry; } - dev->base_addr = (long) ioaddr; - tp->regs_len = mmio_len; + rc = -ENODEV; + goto err_out; } + tp->regs_len = io_len; tp->mmio_addr = ioaddr; /* Bring old chips out of low-power mode. */ @@ -908,10 +893,37 @@ err_out: return ERR_PTR(rc); } +static int rtl8139_set_features(struct net_device *dev, netdev_features_t features) +{ + struct rtl8139_private *tp = netdev_priv(dev); + unsigned long flags; + netdev_features_t changed = features ^ dev->features; + void __iomem *ioaddr = tp->mmio_addr; + + if (!(changed & (NETIF_F_RXALL))) + return 0; + + spin_lock_irqsave(&tp->lock, flags); + + if (changed & NETIF_F_RXALL) { + int rx_mode = tp->rx_config; + if (features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + else + rx_mode &= ~(AcceptErr | AcceptRunt); + tp->rx_config = rtl8139_rx_config | rx_mode; + RTL_W32_F(RxConfig, tp->rx_config); + } + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + static const struct net_device_ops rtl8139_netdev_ops = { .ndo_open = rtl8139_open, .ndo_stop = rtl8139_close, - .ndo_get_stats = rtl8139_get_stats, + .ndo_get_stats64 = rtl8139_get_stats64, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = rtl8139_set_mac_address, @@ -922,10 +934,11 @@ static const struct net_device_ops rtl8139_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = rtl8139_poll_controller, #endif + .ndo_set_features = rtl8139_set_features, }; -static int __devinit rtl8139_init_one (struct pci_dev *pdev, - const struct pci_device_id *ent) +static int rtl8139_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { struct net_device *dev = NULL; struct rtl8139_private *tp; @@ -980,7 +993,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, for (i = 0; i < 3; i++) ((__le16 *) (dev->dev_addr))[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* The Rtl8139-specific entries in the device structure. */ dev->netdev_ops = &rtl8139_netdev_ops; @@ -995,7 +1007,8 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; dev->vlan_features = dev->features; - dev->irq = pdev->irq; + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; /* tp zeroed and aligned in alloc_etherdev */ tp = netdev_priv(dev); @@ -1022,9 +1035,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, pci_set_drvdata (pdev, dev); - netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", + netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n", board_info[ent->driver_data].name, - dev->base_addr, dev->dev_addr, dev->irq); + ioaddr, dev->dev_addr, pdev->irq); netdev_dbg(dev, "Identified 8139 chip type '%s'\n", rtl_chip_info[tp->chipset].name); @@ -1091,7 +1104,7 @@ err_out: } -static void __devexit rtl8139_remove_one (struct pci_dev *pdev) +static void rtl8139_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); struct rtl8139_private *tp = netdev_priv(dev); @@ -1122,14 +1135,14 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev) No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. */ -#define eeprom_delay() (void)RTL_R32(Cfg9346) +#define eeprom_delay() (void)RTL_R8(Cfg9346) /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD (5) #define EE_READ_CMD (6) #define EE_ERASE_CMD (7) -static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len) +static int read_eeprom(void __iomem *ioaddr, int location, int addr_len) { int i; unsigned retval = 0; @@ -1161,7 +1174,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l } /* Terminate the EEPROM access. */ - RTL_W8 (Cfg9346, ~EE_CS); + RTL_W8(Cfg9346, 0); eeprom_delay (); return retval; @@ -1299,10 +1312,11 @@ static void mdio_write (struct net_device *dev, int phy_id, int location, static int rtl8139_open (struct net_device *dev) { struct rtl8139_private *tp = netdev_priv(dev); - int retval; void __iomem *ioaddr = tp->mmio_addr; + const int irq = tp->pci_dev->irq; + int retval; - retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); + retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); if (retval) return retval; @@ -1311,7 +1325,7 @@ static int rtl8139_open (struct net_device *dev) tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, &tp->rx_ring_dma, GFP_KERNEL); if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { - free_irq(dev->irq, dev); + free_irq(irq, dev); if (tp->tx_bufs) dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, @@ -1337,7 +1351,7 @@ static int rtl8139_open (struct net_device *dev) "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", __func__, (unsigned long long)pci_resource_start (tp->pci_dev, 1), - dev->irq, RTL_R8 (MediaStatus), + irq, RTL_R8 (MediaStatus), tp->mii.full_duplex ? "full" : "half"); rtl8139_start_thread(tp); @@ -1703,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, if (len < ETH_ZLEN) memset(tp->tx_buf[entry], 0, ETH_ZLEN); skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); } else { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -1777,8 +1791,10 @@ static void rtl8139_tx_interrupt (struct net_device *dev, dev->stats.tx_fifo_errors++; } dev->stats.collisions += (txstatus >> 24) & 15; - dev->stats.tx_bytes += txstatus & 0x7ff; - dev->stats.tx_packets++; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += txstatus & 0x7ff; + u64_stats_update_end(&tp->tx_stats.syncp); } dirty_tx++; @@ -1941,7 +1957,10 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, /* read size+status of next frame from DMA ring buffer */ rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset)); rx_size = rx_status >> 16; - pkt_size = rx_size - 4; + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = rx_size - 4; + else + pkt_size = rx_size; netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", __func__, rx_status, rx_size, cur_rx); @@ -1979,11 +1998,30 @@ no_early_rx: if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) || (rx_size < 8) || (!(rx_status & RxStatusOK)))) { + if ((dev->features & NETIF_F_RXALL) && + (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) && + (rx_size >= 8) && + (!(rx_status & RxStatusOK))) { + /* Length is at least mostly OK, but pkt has + * error. I'm hoping we can handle some of these + * errors without resetting the chip. --Ben + */ + dev->stats.rx_errors++; + if (rx_status & RxCRCErr) { + dev->stats.rx_crc_errors++; + goto keep_pkt; + } + if (rx_status & RxRunt) { + dev->stats.rx_length_errors++; + goto keep_pkt; + } + } rtl8139_rx_err (rx_status, dev, tp, ioaddr); received = -1; goto out; } +keep_pkt: /* Malloc up new buffer, compatible with net-2e. */ /* Omit the four octet CRC from the length. */ @@ -1998,13 +2036,13 @@ no_early_rx: skb->protocol = eth_type_trans (skb, dev); - dev->stats.rx_bytes += pkt_size; - dev->stats.rx_packets++; + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); netif_receive_skb (skb); } else { - if (net_ratelimit()) - netdev_warn(dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; } received++; @@ -2174,9 +2212,12 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) */ static void rtl8139_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - rtl8139_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + + disable_irq(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); } #endif @@ -2229,7 +2270,7 @@ static int rtl8139_close (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); - free_irq (dev->irq, dev); + free_irq(tp->pci_dev->irq, dev); rtl8139_tx_clear (tp); @@ -2330,9 +2371,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct rtl8139_private *tp = netdev_priv(dev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(tp->pci_dev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); info->regdump_len = tp->regs_len; } @@ -2463,11 +2504,13 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } -static struct net_device_stats *rtl8139_get_stats (struct net_device *dev) +static struct rtnl_link_stats64 * +rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; + unsigned int start; if (netif_running(dev)) { spin_lock_irqsave (&tp->lock, flags); @@ -2476,7 +2519,21 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); } - return &dev->stats; + netdev_stats_to_stats64(stats, &dev->stats); + + do { + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); + + return stats; } /* Set or clear the multicast filter for this adaptor. @@ -2516,6 +2573,9 @@ static void __set_rx_mode (struct net_device *dev) } } + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + /* We can safely update without stopping the chip. */ tmp = rtl8139_rx_config | rx_mode; if (tp->rx_config != tmp) { @@ -2591,7 +2651,7 @@ static struct pci_driver rtl8139_pci_driver = { .name = DRV_NAME, .id_table = rtl8139_pci_tbl, .probe = rtl8139_init_one, - .remove = __devexit_p(rtl8139_remove_one), + .remove = rtl8139_remove_one, #ifdef CONFIG_PM .suspend = rtl8139_suspend, .resume = rtl8139_resume, diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 84083ec6e61..ae5d027096e 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -24,20 +24,19 @@ config ATP select CRC32 ---help--- This is a network (Ethernet) device which attaches to your parallel - port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO, - available from <http://www.tldp.org/docs.html#howto>, if you - want to use this. If you intend to use this driver, you should have - said N to the "Parallel printer support", because the two drivers - don't like each other. + port. Read <file:drivers/net/ethernet/realtek/atp.c> as well as the + Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>, + if you want to use this. If you intend to use this driver, you + should have said N to the "Parallel printer support", because the two + drivers don't like each other. To compile this driver as a module, choose M here: the module will be called atp. config 8139CP - tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support" + depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -52,7 +51,6 @@ config 8139TOO tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -107,7 +105,6 @@ config R8169 depends on PCI select FW_LOADER select CRC32 - select NET_CORE select MII ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. @@ -115,16 +112,4 @@ config R8169 To compile this driver as a module, choose M here: the module will be called r8169. This is recommended. -config SC92031 - tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL - select CRC32 - ---help--- - This is a driver for the Fast Ethernet PCI network cards based on - the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you - have one of these, say Y here. - - To compile this driver as a module, choose M here: the module - will be called sc92031. This is recommended. - endif # NET_VENDOR_REALTEK diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile index e48cfb6ac42..71b1da30ecb 100644 --- a/drivers/net/ethernet/realtek/Makefile +++ b/drivers/net/ethernet/realtek/Makefile @@ -6,4 +6,3 @@ obj-$(CONFIG_8139CP) += 8139cp.o obj-$(CONFIG_8139TOO) += 8139too.o obj-$(CONFIG_ATP) += atp.o obj-$(CONFIG_R8169) += r8169.o -obj-$(CONFIG_SC92031) += sc92031.o diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index e3f57fdbf0e..d77d60ea820 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -140,7 +140,6 @@ static int xcvr[NUM_UNITS]; /* The data transfer mode. */ #include <linux/delay.h> #include <linux/bitops.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> @@ -176,8 +175,7 @@ struct net_local { unsigned int tx_unit_busy:1; unsigned char re_tx, /* Number of packet retransmissions. */ addr_mode, /* Current Rx filter e.g. promiscuous, etc. */ - pac_cnt_in_tx_buf, - chip_type; + pac_cnt_in_tx_buf; }; /* This code, written by wwc@super.org, resets the adapter every @@ -340,7 +338,6 @@ static int __init atp_probe1(long ioaddr) write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); lp = netdev_priv(dev); - lp->chip_type = RTL8002; lp->addr_mode = CMR2h_Normal; spin_lock_init(&lp->lock); @@ -783,10 +780,8 @@ static void net_rx(struct net_device *dev) int pkt_len = (rx_head.rx_count & 0x7ff) - 4; struct sk_buff *skb; - skb = dev_alloc_skb(pkt_len + 2); + skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", - dev->name); dev->stats.rx_dropped++; goto done; } @@ -853,7 +848,7 @@ net_close(struct net_device *dev) * Set or clear the multicast filter for this adapter. */ -static void set_rx_mode_8002(struct net_device *dev) +static void set_rx_mode(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; @@ -865,58 +860,6 @@ static void set_rx_mode_8002(struct net_device *dev) write_reg_high(ioaddr, CMR2, lp->addr_mode); } -static void set_rx_mode_8012(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - long ioaddr = dev->base_addr; - unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */ - int i; - - if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ - new_mode = CMR2h_PROMISC; - } else if ((netdev_mc_count(dev) > 1000) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to filter perfectly -- accept all multicasts. */ - memset(mc_filter, 0xff, sizeof(mc_filter)); - new_mode = CMR2h_Normal; - } else { - struct netdev_hw_addr *ha; - - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, dev) { - int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; - mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); - } - new_mode = CMR2h_Normal; - } - lp->addr_mode = new_mode; - write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */ - for (i = 0; i < 8; i++) - write_reg_byte(ioaddr, i, mc_filter[i]); - if (net_debug > 2 || 1) { - lp->addr_mode = 1; - printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to", - dev->name, lp->addr_mode); - for (i = 0; i < 8; i++) - printk(" %2.2x", mc_filter[i]); - printk(".\n"); - } - - write_reg_high(ioaddr, CMR2, lp->addr_mode); - write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */ -} - -static void set_rx_mode(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - if (lp->chip_type == RTL8002) - return set_rx_mode_8002(dev); - else - return set_rx_mode_8012(dev); -} - - static int __init atp_init_module(void) { if (debug) /* Emit version even if no cards detected. */ printk(KERN_INFO "%s", version); diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h index 0edc642c2c2..040b1373994 100644 --- a/drivers/net/ethernet/realtek/atp.h +++ b/drivers/net/ethernet/realtek/atp.h @@ -16,8 +16,6 @@ struct rx_header { #define PAR_STATUS 1 #define PAR_CONTROL 2 -enum chip_type { RTL8002, RTL8012 }; - #define Ctrl_LNibRead 0x08 /* LP_PSELECP */ #define Ctrl_HNibRead 0 #define Ctrl_LNibWrite 0x08 /* LP_PSELECP */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 92b45f08858..61623e9af57 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -21,7 +21,6 @@ #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> @@ -29,7 +28,6 @@ #include <linux/pci-aspm.h> #include <linux/prefetch.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> @@ -45,6 +43,13 @@ #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" #ifdef RTL8169_DEBUG #define assert(expr) \ @@ -62,36 +67,31 @@ #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) -#define TX_BUFFS_AVAIL(tp) \ - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; -/* MAC address length */ -#define MAC_ADDR_LEN 6 - #define MAX_READ_REQUEST_SHIFT 12 -#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ -#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ #define R8169_REGS_SIZE 256 #define R8169_NAPI_WEIGHT 64 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ -#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ -#define RX_BUF_SIZE 1536 /* Rx Buffer size */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RTL8169_TX_TIMEOUT (6*HZ) #define RTL8169_PHY_TIMEOUT (10*HZ) -#define RTL_EEPROM_SIG cpu_to_le32(0x8129) -#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff) -#define RTL_EEPROM_SIG_ADDR 0x0000 - /* write/read MMIO register */ #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) @@ -137,6 +137,14 @@ enum mac_version { RTL_GIGA_MAC_VER_34, RTL_GIGA_MAC_VER_35, RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, RTL_GIGA_MAC_NONE = 0xff, }; @@ -201,7 +209,7 @@ static const struct { [RTL_GIGA_MAC_VER_16] = _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), [RTL_GIGA_MAC_VER_18] = _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_19] = @@ -249,6 +257,29 @@ static const struct { [RTL_GIGA_MAC_VER_36] = _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_37] = + _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_38] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_39] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_40] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_41] = + _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_42] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_43] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_44] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, + JUMBO_9K, false), }; #undef _R @@ -258,16 +289,14 @@ enum cfg_version { RTL_CFG_2 }; -static void rtl_hw_start_8169(struct net_device *); -static void rtl_hw_start_8168(struct net_device *); -static void rtl_hw_start_8101(struct net_device *); - static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, @@ -314,6 +343,7 @@ enum rtl_registers { #define RXCFG_FIFO_SHIFT 13 /* No threshold before first PCI xfer */ #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) #define RXCFG_DMA_SHIFT 8 /* Unlimited maximum PCI burst. */ #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) @@ -323,6 +353,8 @@ enum rtl_registers { Config0 = 0x51, Config1 = 0x52, Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + Config3 = 0x54, Config4 = 0x55, Config5 = 0x56, @@ -363,6 +395,10 @@ enum rtl8168_8101_registers { #define CSIAR_BYTE_ENABLE 0x0f #define CSIAR_BYTE_ENABLE_SHIFT 12 #define CSIAR_ADDR_MASK 0x0fff +#define CSIAR_FUNC_CARD 0x00000000 +#define CSIAR_FUNC_SDIO 0x00010000 +#define CSIAR_FUNC_NIC 0x00020000 +#define CSIAR_FUNC_NIC2 0x00010000 PMCH = 0x6f, EPHYAR = 0x80, #define EPHYAR_FLAG 0x80000000 @@ -378,8 +414,12 @@ enum rtl8168_8101_registers { TWSI = 0xd2, MCU = 0xd3, #define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) #define EN_NDP (1 << 3) #define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) EFUSEAR = 0xdc, #define EFUSEAR_FLAG 0x80000000 #define EFUSEAR_WRITE_CMD 0x80000000 @@ -405,6 +445,7 @@ enum rtl8168_registers { #define ERIAR_MASK_SHIFT 12 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) EPHY_RXER_NUM = 0x7c, OCPDR = 0xb0, /* OCP GPHY access */ @@ -417,10 +458,14 @@ enum rtl8168_registers { #define OCPAR_FLAG 0x80000000 #define OCPAR_GPHY_WRITE_CMD 0x8000f060 #define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ MISC = 0xf0, /* 8168e only. */ #define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ #define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) }; enum rtl_register_content { @@ -477,7 +522,6 @@ enum rtl_register_content { /* Config1 register p.24 */ LEDS1 = (1 << 7), LEDS0 = (1 << 6), - MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ Speed_down = (1 << 4), MEMMAP = (1 << 3), IOMAP = (1 << 2), @@ -485,6 +529,8 @@ enum rtl_register_content { PMEnable = (1 << 0), /* Power Management Enable */ /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ PCI_Clock_66MHz = 0x01, PCI_Clock_33MHz = 0x00, @@ -492,6 +538,7 @@ enum rtl_register_content { MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ /* Config4 register */ @@ -504,6 +551,7 @@ enum rtl_register_content { Spi_en = (1 << 3), LanWake = (1 << 1), /* LanWake enable/disable */ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ /* TBICSR p.28 */ TBIReset = 0x80000000, @@ -670,19 +718,33 @@ struct rtl8169_counters { __le16 tx_underun; }; +enum rtl_flag { + RTL_FLAG_TASK_ENABLED, + RTL_FLAG_TASK_SLOW_PENDING, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_PHY_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; struct net_device *dev; struct napi_struct napi; - spinlock_t lock; u32 msg_enable; u16 txd_version; u16 mac_version; u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ - u32 dirty_rx; u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ dma_addr_t TxPhyAddr; @@ -691,13 +753,12 @@ struct rtl8169_private { struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ struct timer_list timer; u16 cp_cmd; - u16 intr_event; - u16 napi_event; - u16 intr_mask; + + u16 event_slow; struct mdio_ops { - void (*write)(void __iomem *, int, int); - int (*read)(void __iomem *, int); + void (*write)(struct rtl8169_private *, int, int); + int (*read)(struct rtl8169_private *, int); } mdio_ops; struct pll_power_ops { @@ -710,6 +771,11 @@ struct rtl8169_private { void (*disable)(struct rtl8169_private *); } jumbo_ops; + struct csi_ops { + void (*write)(struct rtl8169_private *, int, int); + u32 (*read)(struct rtl8169_private *, int); + } csi_ops; + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); int (*get_settings)(struct net_device *, struct ethtool_cmd *); void (*phy_reset_enable)(struct rtl8169_private *tp); @@ -717,7 +783,13 @@ struct rtl8169_private { unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); unsigned int (*link_ok)(void __iomem *); int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); - struct delayed_work task; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct mutex mutex; + struct work_struct work; + } wk; + unsigned features; struct mii_if_info mii; @@ -738,6 +810,8 @@ struct rtl8169_private { } phy_action; } *rtl_fw; #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) + + u32 ocp_base; }; MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); @@ -756,78 +830,138 @@ MODULE_FIRMWARE(FIRMWARE_8168E_3); MODULE_FIRMWARE(FIRMWARE_8105E_1); MODULE_FIRMWARE(FIRMWARE_8168F_1); MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); -static int rtl8169_open(struct net_device *dev); -static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); -static int rtl8169_init_ring(struct net_device *dev); -static void rtl_hw_start(struct net_device *dev); -static int rtl8169_close(struct net_device *dev); -static void rtl_set_rx_mode(struct net_device *dev); -static void rtl8169_tx_timeout(struct net_device *dev); -static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); -static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, - void __iomem *, u32 budget); -static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); -static void rtl8169_down(struct net_device *dev); -static void rtl8169_rx_clear(struct rtl8169_private *tp); -static int rtl8169_poll(struct napi_struct *napi, int budget); +static void rtl_lock_work(struct rtl8169_private *tp) +{ + mutex_lock(&tp->wk.mutex); +} + +static void rtl_unlock_work(struct rtl8169_private *tp) +{ + mutex_unlock(&tp->wk.mutex); +} static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) { - int cap = pci_pcie_cap(pdev); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, force); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static void rtl_udelay(unsigned int d) +{ + udelay(d); +} - if (cap) { - u16 ctl; +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + void (*delay)(unsigned int), unsigned int d, int n, + bool high) +{ + int i; - pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); - ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); + for (i = 0; i < n; i++) { + delay(d); + if (c->check(tp) == high) + return true; } + netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n", + c->msg, !high, n, d); + return false; +} + +static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); +} + +static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); +} + +static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, true); +} + +static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(OCPAR) & OCPAR_FLAG; } static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { void __iomem *ioaddr = tp->mmio_addr; - int i; RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); - for (i = 0; i < 20; i++) { - udelay(100); - if (RTL_R32(OCPAR) & OCPAR_FLAG) - break; - } - return RTL_R32(OCPDR); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(OCPDR) : ~0; } static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) { void __iomem *ioaddr = tp->mmio_addr; - int i; RTL_W32(OCPDR, data); RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); - for (i = 0; i < 20; i++) { - udelay(100); - if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0) - break; - } + + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(ERIAR) & ERIAR_FLAG; } static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) { void __iomem *ioaddr = tp->mmio_addr; - int i; RTL_W8(ERIDR, cmd); RTL_W32(ERIAR, 0x800010e8); msleep(2); - for (i = 0; i < 5; i++) { - udelay(100); - if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) - break; - } + + if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5)) + return; ocp_write(tp, 0x1, 0x30, 0x00000001); } @@ -841,36 +975,27 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; } -static void rtl8168_driver_start(struct rtl8169_private *tp) +DECLARE_RTL_COND(rtl_ocp_read_cond) { u16 reg; - int i; - - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); reg = rtl8168_get_ocp_reg(tp); - for (i = 0; i < 10; i++) { - msleep(10); - if (ocp_read(tp, 0x0f, reg) & 0x00000800) - break; - } + return ocp_read(tp, 0x0f, reg) & 0x00000800; } -static void rtl8168_driver_stop(struct rtl8169_private *tp) +static void rtl8168_driver_start(struct rtl8169_private *tp) { - u16 reg; - int i; + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); +} - reg = rtl8168_get_ocp_reg(tp); +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); - for (i = 0; i < 10; i++) { - msleep(10); - if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0) - break; - } + rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); } static int r8168dp_check_dash(struct rtl8169_private *tp) @@ -880,21 +1005,121 @@ static int r8168dp_check_dash(struct rtl8169_private *tp) return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; } -static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) +static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) { - int i; + if (reg & 0xffff0001) { + netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg); + return true; + } + return false; +} - RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff)); +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; - for (i = 20; i > 0; i--) { - /* - * Check if the RTL8169 has completed writing to the specified - * MII register. - */ - if (!(RTL_R32(PHYAR) & 0x80000000)) - break; - udelay(25); + return RTL_R32(GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(GPHY_OCP, reg << 15); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(GPHY_OCP) & 0xffff) : ~0; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(OCPDR, reg << 15); + + return RTL_R32(OCPDR); +} + +#define OCP_STD_PHY_BASE 0xa400 + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); /* * According to hardware specs a 20us delay is required after write * complete indication, but before sending next command. @@ -902,23 +1127,16 @@ static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) udelay(20); } -static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr) +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) { - int i, value = -1; + void __iomem *ioaddr = tp->mmio_addr; + int value; - RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16); + RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(PHYAR) & 0xffff : ~0; - for (i = 20; i > 0; i--) { - /* - * Check if the RTL8169 has completed retrieving data from - * the specified MII register. - */ - if (RTL_R32(PHYAR) & 0x80000000) { - value = RTL_R32(PHYAR) & 0xffff; - break; - } - udelay(25); - } /* * According to hardware specs a 20us delay is required after read * complete indication, but before sending next command. @@ -928,45 +1146,35 @@ static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr) return value; } -static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data) +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) { - int i; + void __iomem *ioaddr = tp->mmio_addr; - RTL_W32(OCPDR, data | - ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); RTL_W32(EPHY_RXER_NUM, 0); - for (i = 0; i < 100; i++) { - mdelay(1); - if (!(RTL_R32(OCPAR) & OCPAR_FLAG)) - break; - } + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); } -static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value) +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) { - r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD | - (value & OCPDR_DATA_MASK)); + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); } -static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr) +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) { - int i; + void __iomem *ioaddr = tp->mmio_addr; - r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD); + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); mdelay(1); RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); RTL_W32(EPHY_RXER_NUM, 0); - for (i = 0; i < 100; i++) { - mdelay(1); - if (RTL_R32(OCPAR) & OCPAR_FLAG) - break; - } - - return RTL_R32(OCPDR) & OCPDR_DATA_MASK; + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0; } #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 @@ -981,22 +1189,25 @@ static void r8168dp_2_mdio_stop(void __iomem *ioaddr) RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); } -static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value) +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) { + void __iomem *ioaddr = tp->mmio_addr; + r8168dp_2_mdio_start(ioaddr); - r8169_mdio_write(ioaddr, reg_addr, value); + r8169_mdio_write(tp, reg, value); r8168dp_2_mdio_stop(ioaddr); } -static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr) +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) { + void __iomem *ioaddr = tp->mmio_addr; int value; r8168dp_2_mdio_start(ioaddr); - value = r8169_mdio_read(ioaddr, reg_addr); + value = r8169_mdio_read(tp, reg); r8168dp_2_mdio_stop(ioaddr); @@ -1005,12 +1216,12 @@ static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr) static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) { - tp->mdio_ops.write(tp->mmio_addr, location, val); + tp->mdio_ops.write(tp, location, val); } static int rtl_readphy(struct rtl8169_private *tp, int location) { - return tp->mdio_ops.read(tp->mmio_addr, location); + return tp->mdio_ops.read(tp, location); } static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) @@ -1041,113 +1252,64 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) return rtl_readphy(tp, location); } -static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value) +DECLARE_RTL_COND(rtl_ephyar_cond) { - unsigned int i; - - RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | - (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + void __iomem *ioaddr = tp->mmio_addr; - for (i = 0; i < 100; i++) { - if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG)) - break; - udelay(10); - } + return RTL_R32(EPHYAR) & EPHYAR_FLAG; } -static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr) +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) { - u16 value = 0xffff; - unsigned int i; - - RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); - - for (i = 0; i < 100; i++) { - if (RTL_R32(EPHYAR) & EPHYAR_FLAG) { - value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK; - break; - } - udelay(10); - } - - return value; -} + void __iomem *ioaddr = tp->mmio_addr; -static void rtl_csi_write(void __iomem *ioaddr, int addr, int value) -{ - unsigned int i; + RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); - RTL_W32(CSIDR, value); - RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); - for (i = 0; i < 100; i++) { - if (!(RTL_R32(CSIAR) & CSIAR_FLAG)) - break; - udelay(10); - } + udelay(10); } -static u32 rtl_csi_read(void __iomem *ioaddr, int addr) +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) { - u32 value = ~0x00; - unsigned int i; - - RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + void __iomem *ioaddr = tp->mmio_addr; - for (i = 0; i < 100; i++) { - if (RTL_R32(CSIAR) & CSIAR_FLAG) { - value = RTL_R32(CSIDR); - break; - } - udelay(10); - } + RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); - return value; + return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0; } -static -void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) { - unsigned int i; + void __iomem *ioaddr = tp->mmio_addr; BUG_ON((addr & 3) || (mask == 0)); RTL_W32(ERIDR, val); RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); - for (i = 0; i < 100; i++) { - if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) - break; - udelay(100); - } + rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); } -static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type) +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) { - u32 value = ~0x00; - unsigned int i; + void __iomem *ioaddr = tp->mmio_addr; RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); - for (i = 0; i < 100; i++) { - if (RTL_R32(ERIAR) & ERIAR_FLAG) { - value = RTL_R32(ERIDR); - break; - } - udelay(100); - } - - return value; + return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(ERIDR) : ~0; } -static void -rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) +static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, + u32 m, int type) { u32 val; - val = rtl_eri_read(ioaddr, addr, type); - rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); + val = rtl_eri_read(tp, addr, type); + rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); } struct exgmac_reg { @@ -1156,38 +1318,78 @@ struct exgmac_reg { u32 val; }; -static void rtl_write_exgmac_batch(void __iomem *ioaddr, +static void rtl_write_exgmac_batch(struct rtl8169_private *tp, const struct exgmac_reg *r, int len) { while (len-- > 0) { - rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); + rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); r++; } } -static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) +DECLARE_RTL_COND(rtl_efusear_cond) { - u8 value = 0xff; - unsigned int i; + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(EFUSEAR) & EFUSEAR_FLAG; +} + +static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + void __iomem *ioaddr = tp->mmio_addr; RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); - for (i = 0; i < 300; i++) { - if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { - value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; - break; - } - udelay(100); - } + return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} - return value; +static u16 rtl_get_events(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R16(IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrStatus, bits); + mmiowb(); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, 0); + mmiowb(); +} + +static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, bits); +} + +#define RTL_EVENT_NAPI_RX (RxOK | RxErr) +#define RTL_EVENT_NAPI_TX (TxOK | TxErr) +#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) + +static void rtl_irq_enable_all(struct rtl8169_private *tp) +{ + rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); } -static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { - RTL_W16(IntrMask, 0x0000); + void __iomem *ioaddr = tp->mmio_addr; - RTL_W16(IntrStatus, 0xffff); + rtl_irq_disable(tp); + rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + RTL_R8(ChipCmd); } static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) @@ -1235,40 +1437,51 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) if (!netif_running(dev)) return; - if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { if (RTL_R8(PHYstatus) & _1000bpsF) { - rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, - 0x00000011, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, - 0x00000005, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); } else if (RTL_R8(PHYstatus) & _100bps) { - rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, - 0x0000001f, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, - 0x00000005, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); } else { - rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, - 0x0000001f, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, - 0x0000003f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); } /* Reset packet filter */ - rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { if (RTL_R8(PHYstatus) & _1000bpsF) { - rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, - 0x00000011, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, - 0x00000005, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); } else { - rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, - 0x0000001f, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, - 0x0000003f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (RTL_R8(PHYstatus) & _10bps) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, + ERIAR_EXGMAC); } } } @@ -1277,9 +1490,6 @@ static void __rtl8169_check_link_status(struct net_device *dev, struct rtl8169_private *tp, void __iomem *ioaddr, bool pm) { - unsigned long flags; - - spin_lock_irqsave(&tp->lock, flags); if (tp->link_ok(ioaddr)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ @@ -1292,9 +1502,8 @@ static void __rtl8169_check_link_status(struct net_device *dev, netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); if (pm) - pm_schedule_suspend(&tp->pci_dev->dev, 100); + pm_schedule_suspend(&tp->pci_dev->dev, 5000); } - spin_unlock_irqrestore(&tp->lock, flags); } static void rtl8169_check_link_status(struct net_device *dev, @@ -1337,12 +1546,12 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); wol->supported = WAKE_ANY; wol->wolopts = __rtl8169_get_wol(tp); - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1354,7 +1563,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) u16 reg; u8 mask; } cfg[] = { - { WAKE_ANY, Config1, PMEnable }, { WAKE_PHY, Config3, LinkUp }, { WAKE_MAGIC, Config3, MagicPacket }, { WAKE_UCAST, Config5, UWF }, @@ -1362,16 +1570,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { WAKE_MCAST, Config5, MWF }, { WAKE_ANY, Config5, LanWake } }; + u8 options; RTL_W8(Cfg9346, Cfg9346_Unlock); for (i = 0; i < ARRAY_SIZE(cfg); i++) { - u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(cfg[i].reg, options); } + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(Config1, options); + break; + default: + options = RTL_R8(Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(Config2, options); + break; + } + RTL_W8(Cfg9346, Cfg9346_Lock); } @@ -1379,14 +1603,15 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); if (wol->wolopts) tp->features |= RTL_FEATURE_WOL; else tp->features &= ~RTL_FEATURE_WOL; __rtl8169_set_wol(tp, wol->wolopts); - spin_unlock_irq(&tp->lock); + + rtl_unlock_work(tp); device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); @@ -1404,12 +1629,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev, struct rtl8169_private *tp = netdev_priv(dev); struct rtl_fw *rtl_fw = tp->rtl_fw; - strcpy(info->driver, MODULENAME); - strcpy(info->version, RTL8169_VERSION); - strcpy(info->bus_info, pci_name(tp->pci_dev)); + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, RTL8169_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); - strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" : - rtl_fw->version); + if (!IS_ERR_OR_NULL(rtl_fw)) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); } static int rtl8169_get_regs_len(struct net_device *dev) @@ -1540,20 +1766,20 @@ out: static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - unsigned long flags; int ret; del_timer_sync(&tp->timer); - spin_lock_irqsave(&tp->lock, flags); + rtl_lock_work(tp); ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), cmd->duplex, cmd->advertising); - spin_unlock_irqrestore(&tp->lock, flags); + rtl_unlock_work(tp); return ret; } -static u32 rtl8169_fix_features(struct net_device *dev, u32 features) +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); @@ -1567,34 +1793,53 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features) return features; } -static int rtl8169_set_features(struct net_device *dev, u32 features) +static void __rtl8169_set_features(struct net_device *dev, + netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); + netdev_features_t changed = features ^ dev->features; void __iomem *ioaddr = tp->mmio_addr; - unsigned long flags; - spin_lock_irqsave(&tp->lock, flags); + if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX))) + return; - if (features & NETIF_F_RXCSUM) - tp->cp_cmd |= RxChkSum; - else - tp->cp_cmd &= ~RxChkSum; + if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; - if (dev->features & NETIF_F_HW_VLAN_RX) - tp->cp_cmd |= RxVlan; - else - tp->cp_cmd &= ~RxVlan; + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; - RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); + } + if (changed & NETIF_F_RXALL) { + int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); + if (features & NETIF_F_RXALL) + tmp |= (AcceptErr | AcceptRunt); + RTL_W32(RxConfig, tmp); + } +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); - spin_unlock_irqrestore(&tp->lock, flags); + rtl_lock_work(tp); + __rtl8169_set_features(dev, features); + rtl_unlock_work(tp); return 0; } -static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, - struct sk_buff *skb) + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) { return (vlan_tx_tag_present(skb)) ? TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; @@ -1605,9 +1850,7 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) u32 opts2 = le32_to_cpu(desc->opts2); if (opts2 & RxVlanTag) - __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); - - desc->opts2 = 0; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) @@ -1641,14 +1884,12 @@ static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - unsigned long flags; int rc; - spin_lock_irqsave(&tp->lock, flags); - + rtl_lock_work(tp); rc = tp->get_settings(dev, cmd); + rtl_unlock_work(tp); - spin_unlock_irqrestore(&tp->lock, flags); return rc; } @@ -1656,14 +1897,14 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct rtl8169_private *tp = netdev_priv(dev); - unsigned long flags; - - if (regs->len > R8169_REGS_SIZE) - regs->len = R8169_REGS_SIZE; + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; - spin_lock_irqsave(&tp->lock, flags); - memcpy_fromio(p, tp->mmio_addr, regs->len); - spin_unlock_irqrestore(&tp->lock, flags); + rtl_lock_work(tp); + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); + rtl_unlock_work(tp); } static u32 rtl8169_get_msglevel(struct net_device *dev) @@ -1706,6 +1947,13 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset) } } +DECLARE_RTL_COND(rtl_counters_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CounterAddrLow) & CounterDump; +} + static void rtl8169_update_counters(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -1714,7 +1962,6 @@ static void rtl8169_update_counters(struct net_device *dev) struct rtl8169_counters *counters; dma_addr_t paddr; u32 cmd; - int wait = 1000; /* * Some chips are unable to dump tally counters when the receiver @@ -1732,13 +1979,8 @@ static void rtl8169_update_counters(struct net_device *dev) RTL_W32(CounterAddrLow, cmd); RTL_W32(CounterAddrLow, cmd | CounterDump); - while (wait--) { - if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) { - memcpy(&tp->counters, counters, sizeof(*counters)); - break; - } - udelay(10); - } + if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000)) + memcpy(&tp->counters, counters, sizeof(*counters)); RTL_W32(CounterAddrLow, 0); RTL_W32(CounterAddrHigh, 0); @@ -1793,6 +2035,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_strings = rtl8169_get_strings, .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -1815,7 +2058,14 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, u32 val; int mac_version; } mac_info[] = { + /* 8168G family. */ + { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, + { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, + { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, + { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, + /* 8168F family. */ + { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, @@ -1853,6 +2103,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, /* 8101 family. */ + { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, @@ -1896,6 +2149,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, netif_notice(tp, probe, dev, "unknown MAC, using family default\n"); tp->mac_version = default_version; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_42 : + RTL_GIGA_MAC_VER_43; } } @@ -1922,9 +2179,7 @@ static void rtl_writephy_batch(struct rtl8169_private *tp, #define PHY_DATA_OR 0x10000000 #define PHY_DATA_AND 0x20000000 #define PHY_BJMPN 0x30000000 -#define PHY_READ_EFUSE 0x40000000 -#define PHY_READ_MAC_BYTE 0x50000000 -#define PHY_WRITE_MAC_BYTE 0x60000000 +#define PHY_MDIO_CHG 0x40000000 #define PHY_CLEAR_READCOUNT 0x70000000 #define PHY_WRITE 0x80000000 #define PHY_READCOUNT_EQ_SKIP 0x90000000 @@ -1933,7 +2188,6 @@ static void rtl_writephy_batch(struct rtl8169_private *tp, #define PHY_WRITE_PREVIOUS 0xc0000000 #define PHY_SKIPN 0xd0000000 #define PHY_DELAY_MS 0xe0000000 -#define PHY_WRITE_ERI_WORD 0xf0000000 struct fw_info { u32 magic; @@ -2010,7 +2264,7 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, case PHY_READ: case PHY_DATA_OR: case PHY_DATA_AND: - case PHY_READ_EFUSE: + case PHY_MDIO_CHG: case PHY_CLEAR_READCOUNT: case PHY_WRITE: case PHY_WRITE_PREVIOUS: @@ -2041,9 +2295,6 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, } break; - case PHY_READ_MAC_BYTE: - case PHY_WRITE_MAC_BYTE: - case PHY_WRITE_ERI_WORD: default: netif_err(tp, ifup, tp->dev, "Invalid action 0x%08x\n", action); @@ -2074,10 +2325,13 @@ out: static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) { struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + struct mdio_ops org, *ops = &tp->mdio_ops; u32 predata, count; size_t index; predata = count = 0; + org.write = ops->write; + org.read = ops->read; for (index = 0; index < pa->size; ) { u32 action = le32_to_cpu(pa->code[index]); @@ -2104,8 +2358,15 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) case PHY_BJMPN: index -= regno; break; - case PHY_READ_EFUSE: - predata = rtl8168d_efuse_read(tp->mmio_addr, regno); + case PHY_MDIO_CHG: + if (data == 0) { + ops->write = org.write; + ops->read = org.read; + } else if (data == 1) { + ops->write = mac_mcu_write; + ops->read = mac_mcu_read; + } + index++; break; case PHY_CLEAR_READCOUNT: @@ -2141,13 +2402,13 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) index++; break; - case PHY_READ_MAC_BYTE: - case PHY_WRITE_MAC_BYTE: - case PHY_WRITE_ERI_WORD: default: BUG(); } } + + ops->write = org.write; + ops->read = org.read; } static void rtl_release_firmware(struct rtl8169_private *tp) @@ -2545,7 +2806,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) { 0x1f, 0x0000 }, { 0x0d, 0xf880 } }; - void __iomem *ioaddr = tp->mmio_addr; rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); @@ -2557,7 +2817,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); - if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { static const struct phy_reg phy_reg_init[] = { { 0x1f, 0x0002 }, { 0x05, 0x669a }, @@ -2657,11 +2917,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) { 0x1f, 0x0000 }, { 0x0d, 0xf880 } }; - void __iomem *ioaddr = tp->mmio_addr; rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); - if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { static const struct phy_reg phy_reg_init[] = { { 0x1f, 0x0002 }, { 0x05, 0x669a }, @@ -2868,6 +3127,23 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x0d, 0x0000); } +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) +{ + const u16 w[] = { + addr[0] | (addr[1] << 8), + addr[2] | (addr[3] << 8), + addr[4] | (addr[5] << 8) + }; + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) } + }; + + rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e)); +} + static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -2929,8 +3205,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0000); /* EEE setting */ - rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, - ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b85); rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); @@ -2951,6 +3226,31 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); rtl_writephy(tp, 0x1f, 0x0000); + + /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ + rtl_rar_exgmac_set(tp, tp->dev->dev_addr); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) +{ + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); } static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) @@ -2994,24 +3294,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - /* For 4-corner performance improve */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b80); - rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - - /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); - - /* Improve 10M EEE waveform */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168f_hw_phy_config(tp); /* Improve 2-pair detection performance */ rtl_writephy(tp, 0x1f, 0x0005); @@ -3024,26 +3307,177 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) { rtl_apply_firmware(tp); - /* For 4-corner performance improve */ + rtl8168f_hw_phy_config(tp); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00aa }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b80); - rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* PHY auto speed down */ + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* Modify green table for giga */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b54); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8b5d); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8a7c); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a7f); + rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000); + rtl_writephy(tp, 0x05, 0x8a82); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a88); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + + /* uc same-seed solution */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* eee setting */ + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x10) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000); + } else { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000); + } + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x13) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000); + } else { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002); + } + + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000); + + /* EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); - /* Improve 10M EEE waveform */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000); + + /* Improve SWR Efficiency */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x11, 0x5655); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); + rtl_writephy(tp, 0x1f, 0x0000); } +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); +} + static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -3087,6 +3521,45 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } +static void rtl8402_hw_phy_config(struct rtl8169_private *tp) +{ + /* Disable ALDPS before setting firmware */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(20); + + rtl_apply_firmware(tp); + + /* EEE setting */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x10, 0x401f); + rtl_writephy(tp, 0x19, 0x7030); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); +} + static void rtl_hw_phy_config(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -3175,23 +3648,41 @@ static void rtl_hw_phy_config(struct net_device *dev) rtl8168f_2_hw_phy_config(tp); break; + case RTL_GIGA_MAC_VER_37: + rtl8402_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl8411_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl8106e_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_40: + rtl8168g_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + rtl8168g_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_41: default: break; } } -static void rtl8169_phy_timer(unsigned long __opaque) +static void rtl_phy_work(struct rtl8169_private *tp) { - struct net_device *dev = (struct net_device *)__opaque; - struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; void __iomem *ioaddr = tp->mmio_addr; unsigned long timeout = RTL8169_PHY_TIMEOUT; assert(tp->mac_version > RTL_GIGA_MAC_VER_01); - spin_lock_irq(&tp->lock); - if (tp->phy_reset_pending(tp)) { /* * A busy loop could burn quite a few cycles on nowadays CPU. @@ -3202,34 +3693,29 @@ static void rtl8169_phy_timer(unsigned long __opaque) } if (tp->link_ok(ioaddr)) - goto out_unlock; + return; - netif_warn(tp, link, dev, "PHY reset until link up\n"); + netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); tp->phy_reset_enable(tp); out_mod_timer: mod_timer(timer, jiffies + timeout); -out_unlock: - spin_unlock_irq(&tp->lock); } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void rtl8169_netpoll(struct net_device *dev) +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { + if (!test_and_set_bit(flag, tp->wk.flags)) + schedule_work(&tp->wk.work); +} + +static void rtl8169_phy_timer(unsigned long __opaque) +{ + struct net_device *dev = (struct net_device *)__opaque; struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - disable_irq(pdev->irq); - rtl8169_interrupt(pdev->irq, dev); - enable_irq(pdev->irq); + rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); } -#endif static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, void __iomem *ioaddr) @@ -3241,18 +3727,16 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, free_netdev(dev); } +DECLARE_RTL_COND(rtl_phy_reset_cond) +{ + return tp->phy_reset_pending(tp); +} + static void rtl8169_phy_reset(struct net_device *dev, struct rtl8169_private *tp) { - unsigned int i; - tp->phy_reset_enable(tp); - for (i = 0; i < 100; i++) { - if (!tp->phy_reset_pending(tp)) - return; - msleep(1); - } - netif_err(tp, link, dev, "PHY reset failed\n"); + rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); } static bool rtl_tbi_enabled(struct rtl8169_private *tp) @@ -3302,37 +3786,23 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) { void __iomem *ioaddr = tp->mmio_addr; - u32 high; - u32 low; - - low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); - high = addr[4] | (addr[5] << 8); - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W32(MAC4, high); + RTL_W32(MAC4, addr[4] | addr[5] << 8); RTL_R32(MAC4); - RTL_W32(MAC0, low); + RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); RTL_R32(MAC0); - if (tp->mac_version == RTL_GIGA_MAC_VER_34) { - const struct exgmac_reg e[] = { - { .addr = 0xe0, ERIAR_MASK_1111, .val = low }, - { .addr = 0xe4, ERIAR_MASK_1111, .val = high }, - { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 }, - { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 | - low >> 16 }, - }; - - rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e)); - } + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); RTL_W8(Cfg9346, Cfg9346_Lock); - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); } static int rtl_set_mac_address(struct net_device *dev, void *p) @@ -3382,67 +3852,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data return -EOPNOTSUPP; } -static const struct rtl_cfg_info { - void (*hw_start)(struct net_device *); - unsigned int region; - unsigned int align; - u16 intr_event; - u16 napi_event; - unsigned features; - u8 default_ver; -} rtl_cfg_infos [] = { - [RTL_CFG_0] = { - .hw_start = rtl_hw_start_8169, - .region = 1, - .align = 0, - .intr_event = SYSErr | LinkChg | RxOverflow | - RxFIFOOver | TxErr | TxOK | RxOK | RxErr, - .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, - .features = RTL_FEATURE_GMII, - .default_ver = RTL_GIGA_MAC_VER_01, - }, - [RTL_CFG_1] = { - .hw_start = rtl_hw_start_8168, - .region = 2, - .align = 8, - .intr_event = SYSErr | LinkChg | RxOverflow | - TxErr | TxOK | RxOK | RxErr, - .napi_event = TxErr | TxOK | RxOK | RxOverflow, - .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, - .default_ver = RTL_GIGA_MAC_VER_11, - }, - [RTL_CFG_2] = { - .hw_start = rtl_hw_start_8101, - .region = 2, - .align = 8, - .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | - RxFIFOOver | TxErr | TxOK | RxOK | RxErr, - .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, - .features = RTL_FEATURE_MSI, - .default_ver = RTL_GIGA_MAC_VER_13, - } -}; - -/* Cfg9346_Unlock assumed. */ -static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, - const struct rtl_cfg_info *cfg) -{ - unsigned msi = 0; - u8 cfg2; - - cfg2 = RTL_R8(Config2) & ~MSIEnable; - if (cfg->features & RTL_FEATURE_MSI) { - if (pci_enable_msi(pdev)) { - dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); - } else { - cfg2 |= MSIEnable; - msi = RTL_FEATURE_MSI; - } - } - RTL_W8(Config2, cfg2); - return msi; -} - static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) { if (tp->features & RTL_FEATURE_MSI) { @@ -3451,26 +3860,7 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) } } -static const struct net_device_ops rtl8169_netdev_ops = { - .ndo_open = rtl8169_open, - .ndo_stop = rtl8169_close, - .ndo_get_stats = rtl8169_get_stats, - .ndo_start_xmit = rtl8169_start_xmit, - .ndo_tx_timeout = rtl8169_tx_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = rtl8169_change_mtu, - .ndo_fix_features = rtl8169_fix_features, - .ndo_set_features = rtl8169_set_features, - .ndo_set_mac_address = rtl_set_mac_address, - .ndo_do_ioctl = rtl8169_ioctl, - .ndo_set_rx_mode = rtl_set_rx_mode, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = rtl8169_netpoll, -#endif - -}; - -static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) +static void rtl_init_mdio_ops(struct rtl8169_private *tp) { struct mdio_ops *ops = &tp->mdio_ops; @@ -3484,6 +3874,14 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) ops->write = r8168dp_2_mdio_write; ops->read = r8168dp_2_mdio_read; break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + ops->write = r8168g_mdio_write; + ops->read = r8168g_mdio_read; + break; default: ops->write = r8169_mdio_write; ops->read = r8169_mdio_read; @@ -3491,16 +3889,50 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) } } +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_29: case RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -3514,9 +3946,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) return false; - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - + rtl_speed_down(tp); rtl_wol_suspend_quirk(tp); return true; @@ -3536,15 +3966,45 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) static void r810x_pll_power_down(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + if (rtl_wol_pll_power_down(tp)) return; r810x_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } } static void r810x_pll_power_up(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + r810x_phy_power_up(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + } } static void r8168_phy_power_up(struct rtl8169_private *tp) @@ -3580,6 +4040,8 @@ static void r8168_phy_power_down(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); break; @@ -3624,7 +4086,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_32 || tp->mac_version == RTL_GIGA_MAC_VER_33) - rtl_ephy_write(ioaddr, 0x19, 0xff64); + rtl_ephy_write(tp, 0x19, 0xff64); if (rtl_wol_pll_power_down(tp)) return; @@ -3641,6 +4103,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_33: RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, + 0xfc000000, ERIAR_EXGMAC); + break; } } @@ -3648,13 +4115,6 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) && - r8168dp_check_dash(tp)) { - return; - } - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: @@ -3665,6 +4125,11 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_33: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, + 0x00000000, ERIAR_EXGMAC); + break; } r8168_phy_power_up(tp); @@ -3687,7 +4152,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) rtl_generic_op(tp, tp->pll_power_ops.up); } -static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) +static void rtl_init_pll_power_ops(struct rtl8169_private *tp) { struct pll_power_ops *ops = &tp->pll_power_ops; @@ -3699,6 +4164,9 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_16: case RTL_GIGA_MAC_VER_29: case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: ops->down = r810x_pll_power_down; ops->up = r810x_pll_power_up; break; @@ -3723,6 +4191,11 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_35: case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_44: ops->down = r8168_pll_power_down; ops->up = r8168_pll_power_up; break; @@ -3762,8 +4235,19 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_22: case RTL_GIGA_MAC_VER_23: case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; + case RTL_GIGA_MAC_VER_40: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; default: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); break; @@ -3772,17 +4256,25 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) { - tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; } static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.enable); + RTL_W8(Cfg9346, Cfg9346_Lock); } static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.disable); + RTL_W8(Cfg9346, Cfg9346_Lock); } static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) @@ -3820,23 +4312,21 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; RTL_W8(MaxTxPacketSize, 0x3f); RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); RTL_W8(Config4, RTL_R8(Config4) | 0x01); - pci_write_config_byte(pdev, 0x79, 0x20); + rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); } static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; RTL_W8(MaxTxPacketSize, 0x0c); RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); RTL_W8(Config4, RTL_R8(Config4) & ~0x01); - pci_write_config_byte(pdev, 0x79, 0x50); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); } static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) @@ -3869,7 +4359,7 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); } -static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp) +static void rtl_init_jumbo_ops(struct rtl8169_private *tp) { struct jumbo_ops *ops = &tp->jumbo_ops; @@ -3912,6 +4402,11 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp) * No action needed for jumbo frames with 8169. * No jumbo for 810x at all. */ + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: default: ops->disable = NULL; ops->enable = NULL; @@ -3919,296 +4414,20 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp) } } -static void rtl_hw_reset(struct rtl8169_private *tp) +DECLARE_RTL_COND(rtl_chipcmd_cond) { void __iomem *ioaddr = tp->mmio_addr; - int i; - - /* Soft reset the chip. */ - RTL_W8(ChipCmd, CmdReset); - - /* Check that the chip has finished the reset. */ - for (i = 0; i < 100; i++) { - if ((RTL_R8(ChipCmd) & CmdReset) == 0) - break; - udelay(100); - } - rtl8169_init_ring_indexes(tp); + return RTL_R8(ChipCmd) & CmdReset; } -static int __devinit -rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; - const unsigned int region = cfg->region; - struct rtl8169_private *tp; - struct mii_if_info *mii; - struct net_device *dev; - void __iomem *ioaddr; - int chipset, i; - int rc; - - if (netif_msg_drv(&debug)) { - printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", - MODULENAME, RTL8169_VERSION); - } - - dev = alloc_etherdev(sizeof (*tp)); - if (!dev) { - if (netif_msg_drv(&debug)) - dev_err(&pdev->dev, "unable to alloc new ethernet\n"); - rc = -ENOMEM; - goto out; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - dev->netdev_ops = &rtl8169_netdev_ops; - tp = netdev_priv(dev); - tp->dev = dev; - tp->pci_dev = pdev; - tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); - - mii = &tp->mii; - mii->dev = dev; - mii->mdio_read = rtl_mdio_read; - mii->mdio_write = rtl_mdio_write; - mii->phy_id_mask = 0x1f; - mii->reg_num_mask = 0x1f; - mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); - - /* disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - - /* enable device (incl. PCI PM wakeup and hotplug setup) */ - rc = pci_enable_device(pdev); - if (rc < 0) { - netif_err(tp, probe, dev, "enable failure\n"); - goto err_out_free_dev_1; - } - - if (pci_set_mwi(pdev) < 0) - netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); - - /* make sure PCI base addr 1 is MMIO */ - if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { - netif_err(tp, probe, dev, - "region #%d not an MMIO resource, aborting\n", - region); - rc = -ENODEV; - goto err_out_mwi_2; - } - - /* check for weird/broken PCI region reporting */ - if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { - netif_err(tp, probe, dev, - "Invalid PCI region size(s), aborting\n"); - rc = -ENODEV; - goto err_out_mwi_2; - } - - rc = pci_request_regions(pdev, MODULENAME); - if (rc < 0) { - netif_err(tp, probe, dev, "could not request regions\n"); - goto err_out_mwi_2; - } - - tp->cp_cmd = RxChkSum; - - if ((sizeof(dma_addr_t) > 4) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { - tp->cp_cmd |= PCIDAC; - dev->features |= NETIF_F_HIGHDMA; - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc < 0) { - netif_err(tp, probe, dev, "DMA configuration failed\n"); - goto err_out_free_res_3; - } - } - - /* ioremap MMIO region */ - ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); - if (!ioaddr) { - netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); - rc = -EIO; - goto err_out_free_res_3; - } - tp->mmio_addr = ioaddr; - - if (!pci_is_pcie(pdev)) - netif_info(tp, probe, dev, "not PCI Express\n"); - - /* Identify chip attached to board */ - rtl8169_get_mac_version(tp, dev, cfg->default_ver); - - rtl_init_rxcfg(tp); - - RTL_W16(IntrMask, 0x0000); - - rtl_hw_reset(tp); - - RTL_W16(IntrStatus, 0xffff); - - pci_set_master(pdev); - - /* - * Pretend we are using VLANs; This bypasses a nasty bug where - * Interrupts stop flowing on high load on 8110SCd controllers. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - tp->cp_cmd |= RxVlan; - - rtl_init_mdio_ops(tp); - rtl_init_pll_power_ops(tp); - rtl_init_jumbo_ops(tp); - - rtl8169_print_mac_version(tp); - - chipset = tp->mac_version; - tp->txd_version = rtl_chip_infos[chipset].txd_version; - - RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W8(Config1, RTL_R8(Config1) | PMEnable); - RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); - if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) - tp->features |= RTL_FEATURE_WOL; - if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) - tp->features |= RTL_FEATURE_WOL; - tp->features |= rtl_try_msi(pdev, ioaddr, cfg); - RTL_W8(Cfg9346, Cfg9346_Lock); - - if (rtl_tbi_enabled(tp)) { - tp->set_speed = rtl8169_set_speed_tbi; - tp->get_settings = rtl8169_gset_tbi; - tp->phy_reset_enable = rtl8169_tbi_reset_enable; - tp->phy_reset_pending = rtl8169_tbi_reset_pending; - tp->link_ok = rtl8169_tbi_link_ok; - tp->do_ioctl = rtl_tbi_ioctl; - } else { - tp->set_speed = rtl8169_set_speed_xmii; - tp->get_settings = rtl8169_gset_xmii; - tp->phy_reset_enable = rtl8169_xmii_reset_enable; - tp->phy_reset_pending = rtl8169_xmii_reset_pending; - tp->link_ok = rtl8169_xmii_link_ok; - tp->do_ioctl = rtl_xmii_ioctl; - } - - spin_lock_init(&tp->lock); - - /* Get MAC address */ - for (i = 0; i < MAC_ADDR_LEN; i++) - dev->dev_addr[i] = RTL_R8(MAC0 + i); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - - SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); - dev->watchdog_timeo = RTL8169_TX_TIMEOUT; - dev->irq = pdev->irq; - dev->base_addr = (unsigned long) ioaddr; - - netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); - - /* don't enable SG, IP_CSUM and TSO by default - it might not work - * properly for all devices */ - dev->features |= NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HIGHDMA; - - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - /* 8110SCd requires hardware Rx VLAN - disallow toggling */ - dev->hw_features &= ~NETIF_F_HW_VLAN_RX; - - tp->intr_mask = 0xffff; - tp->hw_start = cfg->hw_start; - tp->intr_event = cfg->intr_event; - tp->napi_event = cfg->napi_event; - - tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? - ~(RxBOVF | RxFOVF) : ~0; - - init_timer(&tp->timer); - tp->timer.data = (unsigned long) dev; - tp->timer.function = rtl8169_phy_timer; - - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; - - rc = register_netdev(dev); - if (rc < 0) - goto err_out_msi_4; - - pci_set_drvdata(pdev, dev); - - netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n", - rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr, - (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); - if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { - netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " - "tx checksumming: %s]\n", - rtl_chip_infos[chipset].jumbo_max, - rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); - } - - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { - rtl8168_driver_start(tp); - } - - device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); - - if (pci_dev_run_wake(pdev)) - pm_runtime_put_noidle(&pdev->dev); - - netif_carrier_off(dev); - -out: - return rc; - -err_out_msi_4: - rtl_disable_msi(pdev, tp); - iounmap(ioaddr); -err_out_free_res_3: - pci_release_regions(pdev); -err_out_mwi_2: - pci_clear_mwi(pdev); - pci_disable_device(pdev); -err_out_free_dev_1: - free_netdev(dev); - goto out; -} - -static void __devexit rtl8169_remove_one(struct pci_dev *pdev) +static void rtl_hw_reset(struct rtl8169_private *tp) { - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { - rtl8168_driver_stop(tp); - } - - cancel_delayed_work_sync(&tp->task); - - unregister_netdev(dev); - - rtl_release_firmware(tp); - - if (pci_dev_run_wake(pdev)) - pm_runtime_get_noresume(&pdev->dev); + void __iomem *ioaddr = tp->mmio_addr; - /* restore original MAC address */ - rtl_rar_set(tp, dev->perm_addr); + RTL_W8(ChipCmd, CmdReset); - rtl_disable_msi(pdev, tp); - rtl8169_release_board(pdev, dev, tp->mmio_addr); - pci_set_drvdata(pdev, NULL); + rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); } static void rtl_request_uncached_firmware(struct rtl8169_private *tp) @@ -4255,83 +4474,25 @@ static void rtl_request_firmware(struct rtl8169_private *tp) rtl_request_uncached_firmware(tp); } -static int rtl8169_open(struct net_device *dev) +static void rtl_rx_close(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - int retval = -ENOMEM; - - pm_runtime_get_sync(&pdev->dev); - - /* - * Rx and Tx desscriptors needs 256 bytes alignment. - * dma_alloc_coherent provides more. - */ - tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, - &tp->TxPhyAddr, GFP_KERNEL); - if (!tp->TxDescArray) - goto err_pm_runtime_put; - - tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, - &tp->RxPhyAddr, GFP_KERNEL); - if (!tp->RxDescArray) - goto err_free_tx_0; - retval = rtl8169_init_ring(dev); - if (retval < 0) - goto err_free_rx_1; - - INIT_DELAYED_WORK(&tp->task, NULL); - - smp_mb(); - - rtl_request_firmware(tp); - - retval = request_irq(dev->irq, rtl8169_interrupt, - (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, - dev->name, dev); - if (retval < 0) - goto err_release_fw_2; - - napi_enable(&tp->napi); - - rtl8169_init_phy(dev, tp); - - rtl8169_set_features(dev, dev->features); - - rtl_pll_power_up(tp); - - rtl_hw_start(dev); - - tp->saved_wolopts = 0; - pm_runtime_put_noidle(&pdev->dev); + RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} - rtl8169_check_link_status(dev, tp, ioaddr); -out: - return retval; +DECLARE_RTL_COND(rtl_npq_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; -err_release_fw_2: - rtl_release_firmware(tp); - rtl8169_rx_clear(tp); -err_free_rx_1: - dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, - tp->RxPhyAddr); - tp->RxDescArray = NULL; -err_free_tx_0: - dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, - tp->TxPhyAddr); - tp->TxDescArray = NULL; -err_pm_runtime_put: - pm_runtime_put_noidle(&pdev->dev); - goto out; + return RTL_R8(TxPoll) & NPQ; } -static void rtl_rx_close(struct rtl8169_private *tp) +DECLARE_RTL_COND(rtl_txcfg_empty_cond) { void __iomem *ioaddr = tp->mmio_addr; - RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); + return RTL_R32(TxConfig) & TXCFG_EMPTY; } static void rtl8169_hw_reset(struct rtl8169_private *tp) @@ -4339,21 +4500,26 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; /* Disable interrupts */ - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); rtl_rx_close(tp); if (tp->mac_version == RTL_GIGA_MAC_VER_27 || tp->mac_version == RTL_GIGA_MAC_VER_28 || tp->mac_version == RTL_GIGA_MAC_VER_31) { - while (RTL_R8(TxPoll) & NPQ) - udelay(20); + rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36) { + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); - while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) - udelay(100); + rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); } else { RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); udelay(100); @@ -4377,7 +4543,7 @@ static void rtl_hw_start(struct net_device *dev) tp->hw_start(dev); - netif_start_queue(dev); + rtl_irq_enable_all(tp); } static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, @@ -4434,6 +4600,59 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) } } +static void rtl_set_rx_mode(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + u32 data = mc_filter[0]; + + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(data); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + + RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); + + RTL_W32(RxConfig, tmp); +} + static void rtl_hw_start_8169(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -4504,27 +4723,151 @@ static void rtl_hw_start_8169(struct net_device *dev) /* no early-rx interrupts */ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); +} - /* Enable all known interrupts by setting the interrupt mask. */ - RTL_W16(IntrMask, tp->intr_event); +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + if (tp->csi_ops.write) + tp->csi_ops.write(tp, addr, value); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0; } -static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) +static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) { u32 csi; - csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; - rtl_csi_write(ioaddr, 0x070c, csi | bits); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | bits); +} + +static void rtl_csi_access_enable_1(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x17000000); +} + +static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27000000); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CSIAR) & CSIAR_FLAG; +} + +static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC2); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); } -static void rtl_csi_access_enable_1(void __iomem *ioaddr) +static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) { - rtl_csi_access_enable(ioaddr, 0x17000000); + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; } -static void rtl_csi_access_enable_2(void __iomem *ioaddr) +static void rtl_init_csi_ops(struct rtl8169_private *tp) { - rtl_csi_access_enable(ioaddr, 0x27000000); + struct csi_ops *ops = &tp->csi_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + ops->write = NULL; + ops->read = NULL; + break; + + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + ops->write = r8402_csi_write; + ops->read = r8402_csi_read; + break; + + case RTL_GIGA_MAC_VER_44: + ops->write = r8411_csi_write; + ops->read = r8411_csi_read; + break; + + default: + ops->write = r8169_csi_write; + ops->read = r8169_csi_read; + break; + } } struct ephy_info { @@ -4533,41 +4876,43 @@ struct ephy_info { u16 bits; }; -static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len) +static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, + int len) { u16 w; while (len-- > 0) { - w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits; - rtl_ephy_write(ioaddr, e->offset, w); + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); e++; } } static void rtl_disable_clock_request(struct pci_dev *pdev) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); - ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); - } + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); } static void rtl_enable_clock_request(struct pci_dev *pdev) { - int cap = pci_pcie_cap(pdev); + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} - if (cap) { - u16 ctl; +static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 data; - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); - ctl |= PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); - } + data = RTL_R8(Config3); + + if (enable) + data |= Rdy_to_L23; + else + data &= ~Rdy_to_L23; + + RTL_W8(Config3, data); } #define R8168_CPCMD_QUIRK_MASK (\ @@ -4581,39 +4926,50 @@ static void rtl_enable_clock_request(struct pci_dev *pdev) PktCntrDisable | \ Mac_dbgo_sel) -static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); - rtl_tx_performance_tweak(pdev, - (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); + if (tp->dev->mtu <= ETH_DATA_LEN) { + rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | + PCI_EXP_DEVCTL_NOSNOOP_EN); + } } -static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168bef(struct rtl8169_private *tp) { - rtl_hw_start_8168bb(ioaddr, pdev); + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_start_8168bb(tp); RTL_W8(MaxTxPacketSize, TxPacketMax); RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); } -static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + RTL_W8(Config1, RTL_R8(Config1) | Speed_down); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_disable_clock_request(pdev); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168cp[] = { { 0x01, 0, 0x0001 }, @@ -4623,27 +4979,34 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) { 0x07, 0, 0x2000 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); - rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); @@ -4652,80 +5015,92 @@ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(MaxTxPacketSize, TxPacketMax); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168c_1[] = { { 0x02, 0x0800, 0x1000 }, { 0x03, 0, 0x0002 }, { 0x06, 0x0080, 0x0000 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); - rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { static const struct ephy_info e_info_8168c_2[] = { { 0x01, 0, 0x0001 }, { 0x03, 0x0400, 0x0220 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); - rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) { - rtl_hw_start_8168c_2(ioaddr, pdev); + rtl_hw_start_8168c_2(tp); } -static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); - __rtl_hw_start_8168cp(ioaddr, pdev); + __rtl_hw_start_8168cp(tp); } -static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); rtl_disable_clock_request(pdev); RTL_W8(MaxTxPacketSize, TxPacketMax); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } -static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - rtl_csi_access_enable_1(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_csi_access_enable_1(tp); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W8(MaxTxPacketSize, TxPacketMax); rtl_disable_clock_request(pdev); } -static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168d_4[] = { { 0x0b, ~0, 0x48 }, { 0x19, 0x20, 0x50 }, @@ -4733,7 +5108,7 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) }; int i; - rtl_csi_access_enable_1(ioaddr); + rtl_csi_access_enable_1(tp); rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); @@ -4743,15 +5118,17 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) const struct ephy_info *e = e_info_8168d_4 + i; u16 w; - w = rtl_ephy_read(ioaddr, e->offset); - rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits); + w = rtl_ephy_read(tp, e->offset); + rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits); } rtl_enable_clock_request(pdev); } -static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_1[] = { { 0x00, 0x0200, 0x0100 }, { 0x00, 0x0000, 0x0004 }, @@ -4768,11 +5145,12 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) { 0x0a, 0x0000, 0x0040 } }; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); - rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W8(MaxTxPacketSize, TxPacketMax); @@ -4785,28 +5163,30 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); } -static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8168e_2[] = { { 0x09, 0x0000, 0x0080 }, { 0x19, 0x0000, 0x0224 } }; - rtl_csi_access_enable_1(ioaddr); + rtl_csi_access_enable_1(tp); - rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); - rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); RTL_W8(MaxTxPacketSize, EarlySize); @@ -4823,8 +5203,40 @@ static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); } -static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8168f_1[] = { { 0x06, 0x00c0, 0x0020 }, { 0x08, 0x0001, 0x0002 }, @@ -4832,45 +5244,110 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) { 0x19, 0x0000, 0x0224 } }; - rtl_csi_access_enable_1(ioaddr); + rtl_hw_start_8168f(tp); - rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); +} - rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); - rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, - ERIAR_EXGMAC); +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x1e, 0x0000, 0x4000 }, + { 0x19, 0x0000, 0x0224 } + }; - RTL_W8(MaxTxPacketSize, EarlySize); + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_enable(tp, false); - rtl_disable_clock_request(pdev); + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); - RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); /* Adjust EEE LED frequency */ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); - RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); - RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); - RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); + rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20eb } + }; + + rtl_hw_start_8168g_1(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0020, 0x0000 }, + { 0x1e, 0x0000, 0x2000 } + }; + + rtl_hw_start_8168g_1(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); } static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; RTL_W8(Cfg9346, Cfg9346_Unlock); @@ -4885,84 +5362,96 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_22) { - tp->intr_event |= RxFIFOOver | PCSTimeout; - tp->intr_event &= ~RxOverflow; + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + tp->event_slow |= RxFIFOOver | PCSTimeout; + tp->event_slow &= ~RxOverflow; } rtl_set_rx_tx_desc_registers(tp, ioaddr); - rtl_set_rx_mode(dev); - - RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | - (InterFrameGap << TxInterFrameGapShift)); + rtl_set_rx_tx_config_registers(tp); RTL_R8(IntrMask); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: - rtl_hw_start_8168bb(ioaddr, pdev); + rtl_hw_start_8168bb(tp); break; case RTL_GIGA_MAC_VER_12: case RTL_GIGA_MAC_VER_17: - rtl_hw_start_8168bef(ioaddr, pdev); + rtl_hw_start_8168bef(tp); break; case RTL_GIGA_MAC_VER_18: - rtl_hw_start_8168cp_1(ioaddr, pdev); + rtl_hw_start_8168cp_1(tp); break; case RTL_GIGA_MAC_VER_19: - rtl_hw_start_8168c_1(ioaddr, pdev); + rtl_hw_start_8168c_1(tp); break; case RTL_GIGA_MAC_VER_20: - rtl_hw_start_8168c_2(ioaddr, pdev); + rtl_hw_start_8168c_2(tp); break; case RTL_GIGA_MAC_VER_21: - rtl_hw_start_8168c_3(ioaddr, pdev); + rtl_hw_start_8168c_3(tp); break; case RTL_GIGA_MAC_VER_22: - rtl_hw_start_8168c_4(ioaddr, pdev); + rtl_hw_start_8168c_4(tp); break; case RTL_GIGA_MAC_VER_23: - rtl_hw_start_8168cp_2(ioaddr, pdev); + rtl_hw_start_8168cp_2(tp); break; case RTL_GIGA_MAC_VER_24: - rtl_hw_start_8168cp_3(ioaddr, pdev); + rtl_hw_start_8168cp_3(tp); break; case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_27: - rtl_hw_start_8168d(ioaddr, pdev); + rtl_hw_start_8168d(tp); break; case RTL_GIGA_MAC_VER_28: - rtl_hw_start_8168d_4(ioaddr, pdev); + rtl_hw_start_8168d_4(tp); break; case RTL_GIGA_MAC_VER_31: - rtl_hw_start_8168dp(ioaddr, pdev); + rtl_hw_start_8168dp(tp); break; case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: - rtl_hw_start_8168e_1(ioaddr, pdev); + rtl_hw_start_8168e_1(tp); break; case RTL_GIGA_MAC_VER_34: - rtl_hw_start_8168e_2(ioaddr, pdev); + rtl_hw_start_8168e_2(tp); break; case RTL_GIGA_MAC_VER_35: case RTL_GIGA_MAC_VER_36: - rtl_hw_start_8168f_1(ioaddr, pdev); + rtl_hw_start_8168f_1(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl_hw_start_8411(tp); + break; + + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_hw_start_8168g_1(tp); + break; + case RTL_GIGA_MAC_VER_42: + rtl_hw_start_8168g_2(tp); + break; + + case RTL_GIGA_MAC_VER_44: + rtl_hw_start_8411_2(tp); break; default: @@ -4971,13 +5460,13 @@ static void rtl_hw_start_8168(struct net_device *dev) break; } + RTL_W8(Cfg9346, Cfg9346_Lock); + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); - RTL_W8(Cfg9346, Cfg9346_Lock); + rtl_set_rx_mode(dev); RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); - - RTL_W16(IntrMask, tp->intr_event); } #define R810X_CPCMD_QUIRK_MASK (\ @@ -4991,8 +5480,10 @@ static void rtl_hw_start_8168(struct net_device *dev) PktCntrDisable | \ Mac_dbgo_sel) -static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; static const struct ephy_info e_info_8102e_1[] = { { 0x01, 0, 0x6e65 }, { 0x02, 0, 0x091f }, @@ -5005,7 +5496,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) }; u8 cfg1; - rtl_csi_access_enable_2(ioaddr); + rtl_csi_access_enable_2(tp); RTL_W8(DBG_REG, FIX_NAK_1); @@ -5019,12 +5510,15 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) RTL_W8(Config1, cfg1 & ~LEDS0); - rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); + rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); } -static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(ioaddr); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); @@ -5032,15 +5526,16 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); } -static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) { - rtl_hw_start_8102e_2(ioaddr, pdev); + rtl_hw_start_8102e_2(tp); - rtl_ephy_write(ioaddr, 0x03, 0xc2f9); + rtl_ephy_write(tp, 0x03, 0xc2f9); } -static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; static const struct ephy_info e_info_8105e_1[] = { { 0x07, 0, 0x4000 }, { 0x19, 0, 0x0200 }, @@ -5061,13 +5556,60 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); - rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_csi_access_enable_2(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); + + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); } -static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) +static void rtl_hw_start_8106(struct rtl8169_private *tp) { - rtl_hw_start_8105e_1(ioaddr, pdev); - rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); + void __iomem *ioaddr = tp->mmio_addr; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8101(struct net_device *dev) @@ -5076,62 +5618,70 @@ static void rtl_hw_start_8101(struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; - if (tp->mac_version == RTL_GIGA_MAC_VER_13 || - tp->mac_version == RTL_GIGA_MAC_VER_16) { - int cap = pci_pcie_cap(pdev); + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) + tp->event_slow &= ~RxFIFOOver; - if (cap) { - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_NOSNOOP_EN); - } - } + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || + tp->mac_version == RTL_GIGA_MAC_VER_16) + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_tx_config_registers(tp); + switch (tp->mac_version) { case RTL_GIGA_MAC_VER_07: - rtl_hw_start_8102e_1(ioaddr, pdev); + rtl_hw_start_8102e_1(tp); break; case RTL_GIGA_MAC_VER_08: - rtl_hw_start_8102e_3(ioaddr, pdev); + rtl_hw_start_8102e_3(tp); break; case RTL_GIGA_MAC_VER_09: - rtl_hw_start_8102e_2(ioaddr, pdev); + rtl_hw_start_8102e_2(tp); break; case RTL_GIGA_MAC_VER_29: - rtl_hw_start_8105e_1(ioaddr, pdev); + rtl_hw_start_8105e_1(tp); break; case RTL_GIGA_MAC_VER_30: - rtl_hw_start_8105e_2(ioaddr, pdev); + rtl_hw_start_8105e_2(tp); break; - } - - RTL_W8(Cfg9346, Cfg9346_Lock); - RTL_W8(MaxTxPacketSize, TxPacketMax); + case RTL_GIGA_MAC_VER_37: + rtl_hw_start_8402(tp); + break; - rtl_set_rx_max_size(ioaddr, rx_buf_sz); + case RTL_GIGA_MAC_VER_39: + rtl_hw_start_8106(tp); + break; + case RTL_GIGA_MAC_VER_43: + rtl_hw_start_8168g_2(tp); + break; + } - tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; - RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_W8(Cfg9346, Cfg9346_Lock); RTL_W16(IntrMitigate, 0x0000); - rtl_set_rx_tx_desc_registers(tp, ioaddr); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_tx_config_registers(tp); - - RTL_R8(IntrMask); rtl_set_rx_mode(dev); - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + RTL_R8(IntrMask); - RTL_W16(IntrMask, tp->intr_event); + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); } static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) @@ -5311,7 +5861,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, tp->TxDescArray + entry); if (skb) { tp->dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_skb->skb = NULL; } } @@ -5324,95 +5874,34 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) tp->cur_tx = tp->dirty_tx = 0; } -static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) +static void rtl_reset_work(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - - PREPARE_DELAYED_WORK(&tp->task, task); - schedule_delayed_work(&tp->task, 4); -} - -static void rtl8169_wait_for_quiescence(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; - - synchronize_irq(dev->irq); - - /* Wait for any pending NAPI task to complete */ - napi_disable(&tp->napi); - - rtl8169_irq_mask_and_ack(ioaddr); - - tp->intr_mask = 0xffff; - RTL_W16(IntrMask, tp->intr_event); - napi_enable(&tp->napi); -} - -static void rtl8169_reinit_task(struct work_struct *work) -{ - struct rtl8169_private *tp = - container_of(work, struct rtl8169_private, task.work); - struct net_device *dev = tp->dev; - int ret; - - rtnl_lock(); - - if (!netif_running(dev)) - goto out_unlock; - - rtl8169_wait_for_quiescence(dev); - rtl8169_close(dev); - - ret = rtl8169_open(dev); - if (unlikely(ret < 0)) { - if (net_ratelimit()) - netif_err(tp, drv, dev, - "reinit failure (status = %d). Rescheduling\n", - ret); - rtl8169_schedule_work(dev, rtl8169_reinit_task); - } - -out_unlock: - rtnl_unlock(); -} - -static void rtl8169_reset_task(struct work_struct *work) -{ - struct rtl8169_private *tp = - container_of(work, struct rtl8169_private, task.work); struct net_device *dev = tp->dev; int i; - rtnl_lock(); - - if (!netif_running(dev)) - goto out_unlock; + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); - rtl8169_wait_for_quiescence(dev); + rtl8169_hw_reset(tp); for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); - rtl8169_hw_reset(tp); + napi_enable(&tp->napi); rtl_hw_start(dev); netif_wake_queue(dev); rtl8169_check_link_status(dev, tp, tp->mmio_addr); - -out_unlock: - rtnl_unlock(); } static void rtl8169_tx_timeout(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - rtl8169_hw_reset(tp); - - /* Let's wait a bit while any (async) irq lands on */ - rtl8169_schedule_work(dev, rtl8169_reset_task); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, @@ -5466,7 +5955,20 @@ err_out: return -EIO; } -static inline void rtl8169_tso_csum(struct rtl8169_private *tp, +static bool rtl_skb_pad(struct sk_buff *skb) +{ + if (skb_padto(skb, ETH_ZLEN)) + return false; + skb_put(skb, ETH_ZLEN - skb->len); + return true; +} + +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +{ + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; +} + +static inline bool rtl8169_tso_csum(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; @@ -5479,13 +5981,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp, } else if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); + if (ip->protocol == IPPROTO_TCP) opts[offset] |= info->checksum.tcp; else if (ip->protocol == IPPROTO_UDP) opts[offset] |= info->checksum.udp; else WARN_ON_ONCE(1); + } else { + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return rtl_skb_pad(skb); } + return true; } static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, @@ -5501,7 +6010,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, u32 opts[2]; int frags; - if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); goto err_stop_0; } @@ -5509,6 +6018,12 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) goto err_stop_0; + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + if (!rtl8169_tso_csum(tp, skb, opts)) + goto err_update_stats; + len = skb_headlen(skb); mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { @@ -5520,11 +6035,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->tx_skb[entry].len = len; txd->addr = cpu_to_le64(mapping); - opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb)); - opts[0] = DescOwn; - - rtl8169_tso_csum(tp, skb, opts); - frags = rtl8169_xmit_frags(tp, skb, opts); if (frags < 0) goto err_dma_1; @@ -5537,6 +6047,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); + skb_tx_timestamp(skb); + wmb(); /* Anti gcc 2.95.3 bugware (sic) */ @@ -5549,10 +6061,23 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, RTL_W8(TxPoll, NPQ); - if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { + mmiowb(); + + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); netif_stop_queue(dev); - smp_rmb(); - if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) netif_wake_queue(dev); } @@ -5561,7 +6086,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, err_dma_1: rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); err_dma_0: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); +err_update_stats: dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -5604,7 +6130,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); /* The infamous DAC f*ckup only happens at boot time */ - if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { + if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { void __iomem *ioaddr = tp->mmio_addr; netif_info(tp, intr, dev, "disabling PCI DAC\n"); @@ -5615,12 +6141,10 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) rtl8169_hw_reset(tp); - rtl8169_schedule_work(dev, rtl8169_reinit_task); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } -static void rtl8169_tx_interrupt(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr) +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) { unsigned int dirty_tx, tx_left; @@ -5641,9 +6165,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev, rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += tx_skb->skb->len; - dev_kfree_skb(tx_skb->skb); + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); + dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } dirty_tx++; @@ -5652,9 +6178,16 @@ static void rtl8169_tx_interrupt(struct net_device *dev, if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; - smp_wmb(); + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); if (netif_queue_stopped(dev) && - (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { netif_wake_queue(dev); } /* @@ -5663,9 +6196,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev, * of start_xmit activity is detected (if it is not detected, * it is slow enough). -- FR */ - smp_rmb(); - if (tp->cur_tx != dirty_tx) + if (tp->cur_tx != dirty_tx) { + void __iomem *ioaddr = tp->mmio_addr; + RTL_W8(TxPoll, NPQ); + } } } @@ -5704,18 +6239,14 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, return skb; } -static int rtl8169_rx_interrupt(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr, u32 budget) +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) { unsigned int cur_rx, rx_left; unsigned int count; cur_rx = tp->cur_rx; - rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; - rx_left = min(rx_left, budget); - for (; rx_left > 0; rx_left--, cur_rx++) { + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; struct RxDesc *desc = tp->RxDescArray + entry; u32 status; @@ -5734,14 +6265,24 @@ static int rtl8169_rx_interrupt(struct net_device *dev, if (status & RxCRC) dev->stats.rx_crc_errors++; if (status & RxFOVF) { - rtl8169_schedule_work(dev, rtl8169_reset_task); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); dev->stats.rx_fifo_errors++; } - rtl8169_mark_to_asic(desc, rx_buf_sz); + if ((status & (RxRUNT | RxCRC)) && + !(status & (RxRWT | RxFOVF)) && + (dev->features & NETIF_F_RXALL)) + goto process_pkt; } else { struct sk_buff *skb; - dma_addr_t addr = le64_to_cpu(desc->addr); - int pkt_size = (status & 0x00003fff) - 4; + dma_addr_t addr; + int pkt_size; + +process_pkt: + addr = le64_to_cpu(desc->addr); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; /* * The driver does not support incoming fragmented @@ -5751,16 +6292,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev, if (unlikely(rtl8169_fragmented_frame(status))) { dev->stats.rx_dropped++; dev->stats.rx_length_errors++; - rtl8169_mark_to_asic(desc, rx_buf_sz); - continue; + goto release_descriptor; } skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], tp, pkt_size, addr); - rtl8169_mark_to_asic(desc, rx_buf_sz); if (!skb) { dev->stats.rx_dropped++; - continue; + goto release_descriptor; } rtl8169_rx_csum(skb, status); @@ -5771,23 +6310,20 @@ static int rtl8169_rx_interrupt(struct net_device *dev, napi_gro_receive(&tp->napi, skb); - dev->stats.rx_bytes += pkt_size; - dev->stats.rx_packets++; - } - - /* Work around for AMD plateform. */ - if ((desc->opts2 & cpu_to_le32(0xfffe000)) && - (tp->mac_version == RTL_GIGA_MAC_VER_05)) { - desc->opts2 = 0; - cur_rx++; + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); } +release_descriptor: + desc->opts2 = 0; + wmb(); + rtl8169_mark_to_asic(desc, rx_buf_sz); } count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; - tp->dirty_rx += count; - return count; } @@ -5795,115 +6331,116 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; int handled = 0; - int status; - - /* loop handling interrupts until we have no new ones or - * we hit a invalid/hotplug case. - */ - status = RTL_R16(IntrStatus); - while (status && status != 0xffff) { - handled = 1; + u16 status; - /* Handle all of the error cases first. These will reset - * the chip, so just exit the loop. - */ - if (unlikely(!netif_running(dev))) { - rtl8169_hw_reset(tp); - break; - } + status = rtl_get_events(tp); + if (status && status != 0xffff) { + status &= RTL_EVENT_NAPI | tp->event_slow; + if (status) { + handled = 1; - if (unlikely(status & RxFIFOOver)) { - switch (tp->mac_version) { - /* Work around for rx fifo overflow */ - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_26: - netif_stop_queue(dev); - rtl8169_tx_timeout(dev); - goto done; - /* Testers needed. */ - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - /* Experimental science. Pktgen proof. */ - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_25: - if (status == RxFIFOOver) - goto done; - break; - default: - break; - } + rtl_irq_disable(tp); + napi_schedule(&tp->napi); } + } + return IRQ_RETVAL(handled); +} - if (unlikely(status & SYSErr)) { - rtl8169_pcierr_interrupt(dev); +/* + * Workqueue context. + */ +static void rtl_slow_event_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u16 status; + + status = rtl_get_events(tp) & tp->event_slow; + rtl_ack_events(tp, status); + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + netif_stop_queue(dev); + /* XXX - Hack alert. See rtl_task(). */ + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + default: break; } + } - if (status & LinkChg) - __rtl8169_check_link_status(dev, tp, ioaddr, true); + if (unlikely(status & SYSErr)) + rtl8169_pcierr_interrupt(dev); - /* We need to see the lastest version of tp->intr_mask to - * avoid ignoring an MSI interrupt and having to wait for - * another event which may never come. - */ - smp_rmb(); - if (status & tp->intr_mask & tp->napi_event) { - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); - tp->intr_mask = ~tp->napi_event; + if (status & LinkChg) + __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); - if (likely(napi_schedule_prep(&tp->napi))) - __napi_schedule(&tp->napi); - else - netif_info(tp, intr, dev, - "interrupt %04x in poll\n", status); - } + rtl_irq_enable_all(tp); +} - /* We only get a new MSI interrupt when all active irq - * sources on the chip have been acknowledged. So, ack - * everything we've seen and check if new sources have become - * active to avoid blocking all interrupts from the chip. - */ - RTL_W16(IntrStatus, - (status & RxFIFOOver) ? (status | RxOverflow) : status); - status = RTL_R16(IntrStatus); +static void rtl_task(struct work_struct *work) +{ + static const struct { + int bitnr; + void (*action)(struct rtl8169_private *); + } rtl_work[] = { + /* XXX - keep rtl_slow_event_work() as first element. */ + { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, + { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, + { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } + }; + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + struct net_device *dev = tp->dev; + int i; + + rtl_lock_work(tp); + + if (!netif_running(dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(rtl_work); i++) { + bool pending; + + pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags); + if (pending) + rtl_work[i].action(tp); } -done: - return IRQ_RETVAL(handled); + +out_unlock: + rtl_unlock_work(tp); } static int rtl8169_poll(struct napi_struct *napi, int budget) { struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); struct net_device *dev = tp->dev; - void __iomem *ioaddr = tp->mmio_addr; - int work_done; + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; + int work_done= 0; + u16 status; + + status = rtl_get_events(tp); + rtl_ack_events(tp, status & ~tp->event_slow); + + if (status & RTL_EVENT_NAPI_RX) + work_done = rtl_rx(dev, tp, (u32) budget); + + if (status & RTL_EVENT_NAPI_TX) + rtl_tx(dev, tp); + + if (status & tp->event_slow) { + enable_mask &= ~tp->event_slow; - work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); - rtl8169_tx_interrupt(dev, tp, ioaddr); + rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); + } if (work_done < budget) { napi_complete(napi); - /* We need for force the visibility of tp->intr_mask - * for other CPUs, as we can loose an MSI interrupt - * and potentially wait for a retransmit timeout if we don't. - * The posted write to IntrMask is safe, as it will - * eventually make it to the chip and we won't loose anything - * until it does. - */ - tp->intr_mask = 0xffff; - wmb(); - RTL_W16(IntrMask, tp->intr_event); + rtl_irq_enable(tp, enable_mask); + mmiowb(); } return work_done; @@ -5927,26 +6464,19 @@ static void rtl8169_down(struct net_device *dev) del_timer_sync(&tp->timer); - netif_stop_queue(dev); - napi_disable(&tp->napi); - - spin_lock_irq(&tp->lock); + netif_stop_queue(dev); rtl8169_hw_reset(tp); /* * At this point device interrupts can not be enabled in any function, - * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task, - * rtl8169_reinit_task) and napi is disabled (rtl8169_poll). + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) + * and napi is disabled (rtl8169_poll). */ rtl8169_rx_missed(dev, ioaddr); - spin_unlock_irq(&tp->lock); - - synchronize_irq(dev->irq); - /* Give a racing hard_start_xmit a few cycles to complete. */ - synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ + synchronize_sched(); rtl8169_tx_clear(tp); @@ -5965,9 +6495,15 @@ static int rtl8169_close(struct net_device *dev) /* Update counters before going down */ rtl8169_update_counters(dev); + rtl_lock_work(tp); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl8169_down(dev); + rtl_unlock_work(tp); + + cancel_work_sync(&tp->wk.work); - free_irq(dev->irq, dev); + free_irq(pdev->irq, dev); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); @@ -5981,77 +6517,127 @@ static int rtl8169_close(struct net_device *dev) return 0; } -static void rtl_set_rx_mode(struct net_device *dev) +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(tp->pci_dev->irq, dev); +} +#endif + +static int rtl_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - unsigned long flags; - u32 mc_filter[2]; /* Multicast hash filter */ - int rx_mode; - u32 tmp = 0; + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; - if (dev->flags & IFF_PROMISC) { - /* Unconditionally log net taps. */ - netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); - rx_mode = - AcceptBroadcast | AcceptMulticast | AcceptMyPhys | - AcceptAllPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else if ((netdev_mc_count(dev) > multicast_filter_limit) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to filter perfectly -- accept all multicasts. */ - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else { - struct netdev_hw_addr *ha; + pm_runtime_get_sync(&pdev->dev); - rx_mode = AcceptBroadcast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(ha, dev) { - int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - rx_mode |= AcceptMulticast; - } - } + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; - spin_lock_irqsave(&tp->lock, flags); + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; - tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + retval = rtl8169_init_ring(dev); + if (retval < 0) + goto err_free_rx_1; - if (tp->mac_version > RTL_GIGA_MAC_VER_06) { - u32 data = mc_filter[0]; + INIT_WORK(&tp->wk.work, rtl_task); - mc_filter[0] = swab32(mc_filter[1]); - mc_filter[1] = swab32(data); - } + smp_mb(); - RTL_W32(MAR0 + 4, mc_filter[1]); - RTL_W32(MAR0 + 0, mc_filter[0]); + rtl_request_firmware(tp); - RTL_W32(RxConfig, tmp); + retval = request_irq(pdev->irq, rtl8169_interrupt, + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, + dev->name, dev); + if (retval < 0) + goto err_release_fw_2; + + rtl_lock_work(tp); + + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + napi_enable(&tp->napi); + + rtl8169_init_phy(dev, tp); + + __rtl8169_set_features(dev, dev->features); + + rtl_pll_power_up(tp); + + rtl_hw_start(dev); + + netif_start_queue(dev); + + rtl_unlock_work(tp); + + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + + rtl8169_check_link_status(dev, tp, ioaddr); +out: + return retval; - spin_unlock_irqrestore(&tp->lock, flags); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; } -/** - * rtl8169_get_stats - Get rtl8169 read/write statistics - * @dev: The Ethernet Device to get statistics for - * - * Get TX/RX statistics for rtl8169 - */ -static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 * +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - unsigned long flags; + unsigned int start; - if (netif_running(dev)) { - spin_lock_irqsave(&tp->lock, flags); + if (netif_running(dev)) rtl8169_rx_missed(dev, ioaddr); - spin_unlock_irqrestore(&tp->lock, flags); - } - return &dev->stats; + do { + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + + + do { + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + stats->rx_length_errors = dev->stats.rx_length_errors; + stats->rx_errors = dev->stats.rx_errors; + stats->rx_crc_errors = dev->stats.rx_crc_errors; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->rx_missed_errors = dev->stats.rx_missed_errors; + + return stats; } static void rtl8169_net_suspend(struct net_device *dev) @@ -6061,10 +6647,15 @@ static void rtl8169_net_suspend(struct net_device *dev) if (!netif_running(dev)) return; - rtl_pll_power_down(tp); - netif_device_detach(dev); netif_stop_queue(dev); + + rtl_lock_work(tp); + napi_disable(&tp->napi); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_pll_power_down(tp); } #ifdef CONFIG_PM @@ -6087,7 +6678,12 @@ static void __rtl8169_resume(struct net_device *dev) rtl_pll_power_up(tp); - rtl8169_schedule_work(dev, rtl8169_reset_task); + rtl_lock_work(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } static int rtl8169_resume(struct device *device) @@ -6113,10 +6709,10 @@ static int rtl8169_runtime_suspend(struct device *device) if (!tp->TxDescArray) return 0; - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); tp->saved_wolopts = __rtl8169_get_wol(tp); __rtl8169_set_wol(tp, WAKE_ANY); - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); rtl8169_net_suspend(dev); @@ -6132,10 +6728,10 @@ static int rtl8169_runtime_resume(struct device *device) if (!tp->TxDescArray) return 0; - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); tp->saved_wolopts = 0; - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); rtl8169_init_phy(dev, tp); @@ -6197,18 +6793,17 @@ static void rtl_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &pdev->dev; + + pm_runtime_get_sync(d); rtl8169_net_suspend(dev); /* Restore original MAC address */ rtl_rar_set(tp, dev->perm_addr); - spin_lock_irq(&tp->lock); - rtl8169_hw_reset(tp); - spin_unlock_irq(&tp->lock); - if (system_state == SYSTEM_POWER_OFF) { if (__rtl8169_get_wol(tp) & WAKE_ANY) { rtl_wol_suspend_quirk(tp); @@ -6218,26 +6813,433 @@ static void rtl_shutdown(struct pci_dev *pdev) pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); } + + pm_runtime_put_noidle(d); } -static struct pci_driver rtl8169_pci_driver = { - .name = MODULENAME, - .id_table = rtl8169_pci_tbl, - .probe = rtl8169_init_one, - .remove = __devexit_p(rtl8169_remove_one), - .shutdown = rtl_shutdown, - .driver.pm = RTL8169_PM_OPS, +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_stop(tp); + } + + netif_napi_del(&tp->napi); + + unregister_netdev(dev); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl_disable_msi(pdev, tp); + rtl8169_release_board(pdev, dev, tp->mmio_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static const struct rtl_cfg_info { + void (*hw_start)(struct net_device *); + unsigned int region; + unsigned int align; + u16 event_slow; + unsigned features; + u8 default_ver; +} rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .region = 1, + .align = 0, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, + .features = RTL_FEATURE_GMII, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow, + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | + PCSTimeout, + .features = RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_13, + } }; -static int __init rtl8169_init_module(void) +/* Cfg9346_Unlock assumed. */ +static unsigned rtl_try_msi(struct rtl8169_private *tp, + const struct rtl_cfg_info *cfg) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned msi = 0; + u8 cfg2; + + cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (cfg->features & RTL_FEATURE_MSI) { + if (pci_enable_msi(tp->pci_dev)) { + netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); + } else { + cfg2 |= MSIEnable; + msi = RTL_FEATURE_MSI; + } + } + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + RTL_W8(Config2, cfg2); + return msi; +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) { - return pci_register_driver(&rtl8169_pci_driver); + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(MCU) & LINK_LIST_RDY; } -static void __exit rtl8169_cleanup_module(void) +DECLARE_RTL_COND(rtl_rxtx_empty_cond) { - pci_unregister_driver(&rtl8169_pci_driver); + void __iomem *ioaddr = tp->mmio_addr; + + return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 data; + + tp->ocp_base = OCP_STD_PHY_BASE; + + RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) + return; + + if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) + return; + + RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + data = r8168_mac_ocp_read(tp, 0xe8de); + data &= ~(1 << 14); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; + + data = r8168_mac_ocp_read(tp, 0xe8de); + data |= (1 << 15); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; } -module_init(rtl8169_init_module); -module_exit(rtl8169_cleanup_module); +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + rtl_hw_init_8168g(tp); + break; + + default: + break; + } +} + +static int +rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + const unsigned int region = cfg->region; + struct rtl8169_private *tp; + struct mii_if_info *mii; + struct net_device *dev; + void __iomem *ioaddr; + int chipset, i; + int rc; + + if (netif_msg_drv(&debug)) { + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8169_VERSION); + } + + dev = alloc_etherdev(sizeof (*tp)); + if (!dev) { + rc = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + + mii = &tp->mii; + mii->dev = dev; + mii->mdio_read = rtl_mdio_read; + mii->mdio_write = rtl_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { + netif_err(tp, probe, dev, "enable failure\n"); + goto err_out_free_dev_1; + } + + if (pci_set_mwi(pdev) < 0) + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { + netif_err(tp, probe, dev, + "region #%d not an MMIO resource, aborting\n", + region); + rc = -ENODEV; + goto err_out_mwi_2; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + netif_err(tp, probe, dev, + "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out_mwi_2; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { + netif_err(tp, probe, dev, "could not request regions\n"); + goto err_out_mwi_2; + } + + tp->cp_cmd = RxChkSum; + + if ((sizeof(dma_addr_t) > 4) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_free_res_3; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + if (!ioaddr) { + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res_3; + } + tp->mmio_addr = ioaddr; + + if (!pci_is_pcie(pdev)) + netif_info(tp, probe, dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, dev, cfg->default_ver); + + rtl_init_rxcfg(tp); + + rtl_irq_disable(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rtl_ack_events(tp, 0xffff); + + pci_set_master(pdev); + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + tp->cp_cmd |= RxVlan; + + rtl_init_mdio_ops(tp); + rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); + rtl_init_csi_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + tp->txd_version = rtl_chip_infos[chipset].txd_version; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) + tp->features |= RTL_FEATURE_WOL; + tp->features |= rtl_try_msi(tp, cfg); + RTL_W8(Cfg9346, Cfg9346_Lock); + + if (rtl_tbi_enabled(tp)) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + tp->do_ioctl = rtl_tbi_ioctl; + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + tp->do_ioctl = rtl_xmii_ioctl; + } + + mutex_init(&tp->wk.mutex); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + + /* Get MAC address */ + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = RTL_R8(MAC0 + i); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* 8110SCd requires hardware Rx VLAN - disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + tp->hw_start = cfg->hw_start; + tp->event_slow = cfg->event_slow; + + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) dev; + tp->timer.function = rtl8169_phy_timer; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + rc = register_netdev(dev); + if (rc < 0) + goto err_out_msi_4; + + pci_set_drvdata(pdev, dev); + + netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", + rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, + (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_start(tp); + } + + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + netif_carrier_off(dev); + +out: + return rc; + +err_out_msi_4: + netif_napi_del(&tp->napi); + rtl_disable_msi(pdev, tp); + iounmap(ioaddr); +err_out_free_res_3: + pci_release_regions(pdev); +err_out_mwi_2: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev_1: + free_netdev(dev); + goto out; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = RTL8169_PM_OPS, +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/drivers/net/ethernet/realtek/sc92031.c b/drivers/net/ethernet/realtek/sc92031.c deleted file mode 100644 index a284d644053..00000000000 --- a/drivers/net/ethernet/realtek/sc92031.c +++ /dev/null @@ -1,1609 +0,0 @@ -/* Silan SC92031 PCI Fast Ethernet Adapter driver - * - * Based on vendor drivers: - * Silan Fast Ethernet Netcard Driver: - * MODULE_AUTHOR ("gaoyonghong"); - * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver"); - * MODULE_LICENSE("GPL"); - * 8139D Fast Ethernet driver: - * (C) 2002 by gaoyonghong - * MODULE_AUTHOR ("gaoyonghong"); - * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver"); - * MODULE_LICENSE("GPL"); - * Both are almost identical and seem to be based on pci-skeleton.c - * - * Rewritten for 2.6 by Cesar Eduardo Barros - * - * A datasheet for this chip can be found at - * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf - */ - -/* Note about set_mac_address: I don't know how to change the hardware - * matching, so you need to enable IFF_PROMISC when using it. - */ - -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/mii.h> -#include <linux/crc32.h> - -#include <asm/irq.h> - -#define SC92031_NAME "sc92031" - -/* BAR 0 is MMIO, BAR 1 is PIO */ -#ifndef SC92031_USE_BAR -#define SC92031_USE_BAR 0 -#endif - -/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ -static int multicast_filter_limit = 64; -module_param(multicast_filter_limit, int, 0); -MODULE_PARM_DESC(multicast_filter_limit, - "Maximum number of filtered multicast addresses"); - -static int media; -module_param(media, int, 0); -MODULE_PARM_DESC(media, "Media type (0x00 = autodetect," - " 0x01 = 10M half, 0x02 = 10M full," - " 0x04 = 100M half, 0x08 = 100M full)"); - -/* Size of the in-memory receive ring. */ -#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/ -#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX) - -/* Number of Tx descriptor registers. */ -#define NUM_TX_DESC 4 - -/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/ -#define MAX_ETH_FRAME_SIZE 1536 - -/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */ -#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE -#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) - -/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ -#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ - -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (4*HZ) - -#define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */ - -/* media options */ -#define AUTOSELECT 0x00 -#define M10_HALF 0x01 -#define M10_FULL 0x02 -#define M100_HALF 0x04 -#define M100_FULL 0x08 - - /* Symbolic offsets to registers. */ -enum silan_registers { - Config0 = 0x00, // Config0 - Config1 = 0x04, // Config1 - RxBufWPtr = 0x08, // Rx buffer writer poiter - IntrStatus = 0x0C, // Interrupt status - IntrMask = 0x10, // Interrupt mask - RxbufAddr = 0x14, // Rx buffer start address - RxBufRPtr = 0x18, // Rx buffer read pointer - Txstatusall = 0x1C, // Transmit status of all descriptors - TxStatus0 = 0x20, // Transmit status (Four 32bit registers). - TxAddr0 = 0x30, // Tx descriptors (also four 32bit). - RxConfig = 0x40, // Rx configuration - MAC0 = 0x44, // Ethernet hardware address. - MAR0 = 0x4C, // Multicast filter. - RxStatus0 = 0x54, // Rx status - TxConfig = 0x5C, // Tx configuration - PhyCtrl = 0x60, // physical control - FlowCtrlConfig = 0x64, // flow control - Miicmd0 = 0x68, // Mii command0 register - Miicmd1 = 0x6C, // Mii command1 register - Miistatus = 0x70, // Mii status register - Timercnt = 0x74, // Timer counter register - TimerIntr = 0x78, // Timer interrupt register - PMConfig = 0x7C, // Power Manager configuration - CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers) - Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser) - LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser) - TestD0 = 0xD0, - TestD4 = 0xD4, - TestD8 = 0xD8, -}; - -#define MII_JAB 16 -#define MII_OutputStatus 24 - -#define PHY_16_JAB_ENB 0x1000 -#define PHY_16_PORT_ENB 0x1 - -enum IntrStatusBits { - LinkFail = 0x80000000, - LinkOK = 0x40000000, - TimeOut = 0x20000000, - RxOverflow = 0x0040, - RxOK = 0x0020, - TxOK = 0x0001, - IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK, -}; - -enum TxStatusBits { - TxCarrierLost = 0x20000000, - TxAborted = 0x10000000, - TxOutOfWindow = 0x08000000, - TxNccShift = 22, - EarlyTxThresShift = 16, - TxStatOK = 0x8000, - TxUnderrun = 0x4000, - TxOwn = 0x2000, -}; - -enum RxStatusBits { - RxStatesOK = 0x80000, - RxBadAlign = 0x40000, - RxHugeFrame = 0x20000, - RxSmallFrame = 0x10000, - RxCRCOK = 0x8000, - RxCrlFrame = 0x4000, - Rx_Broadcast = 0x2000, - Rx_Multicast = 0x1000, - RxAddrMatch = 0x0800, - MiiErr = 0x0400, -}; - -enum RxConfigBits { - RxFullDx = 0x80000000, - RxEnb = 0x40000000, - RxSmall = 0x20000000, - RxHuge = 0x10000000, - RxErr = 0x08000000, - RxAllphys = 0x04000000, - RxMulticast = 0x02000000, - RxBroadcast = 0x01000000, - RxLoopBack = (1 << 23) | (1 << 22), - LowThresholdShift = 12, - HighThresholdShift = 2, -}; - -enum TxConfigBits { - TxFullDx = 0x80000000, - TxEnb = 0x40000000, - TxEnbPad = 0x20000000, - TxEnbHuge = 0x10000000, - TxEnbFCS = 0x08000000, - TxNoBackOff = 0x04000000, - TxEnbPrem = 0x02000000, - TxCareLostCrs = 0x1000000, - TxExdCollNum = 0xf00000, - TxDataRate = 0x80000, -}; - -enum PhyCtrlconfigbits { - PhyCtrlAne = 0x80000000, - PhyCtrlSpd100 = 0x40000000, - PhyCtrlSpd10 = 0x20000000, - PhyCtrlPhyBaseAddr = 0x1f000000, - PhyCtrlDux = 0x800000, - PhyCtrlReset = 0x400000, -}; - -enum FlowCtrlConfigBits { - FlowCtrlFullDX = 0x80000000, - FlowCtrlEnb = 0x40000000, -}; - -enum Config0Bits { - Cfg0_Reset = 0x80000000, - Cfg0_Anaoff = 0x40000000, - Cfg0_LDPS = 0x20000000, -}; - -enum Config1Bits { - Cfg1_EarlyRx = 1 << 31, - Cfg1_EarlyTx = 1 << 30, - - //rx buffer size - Cfg1_Rcv8K = 0x0, - Cfg1_Rcv16K = 0x1, - Cfg1_Rcv32K = 0x3, - Cfg1_Rcv64K = 0x7, - Cfg1_Rcv128K = 0xf, -}; - -enum MiiCmd0Bits { - Mii_Divider = 0x20000000, - Mii_WRITE = 0x400000, - Mii_READ = 0x200000, - Mii_SCAN = 0x100000, - Mii_Tamod = 0x80000, - Mii_Drvmod = 0x40000, - Mii_mdc = 0x20000, - Mii_mdoen = 0x10000, - Mii_mdo = 0x8000, - Mii_mdi = 0x4000, -}; - -enum MiiStatusBits { - Mii_StatusBusy = 0x80000000, -}; - -enum PMConfigBits { - PM_Enable = 1 << 31, - PM_LongWF = 1 << 30, - PM_Magic = 1 << 29, - PM_LANWake = 1 << 28, - PM_LWPTN = (1 << 27 | 1<< 26), - PM_LinkUp = 1 << 25, - PM_WakeUp = 1 << 24, -}; - -/* Locking rules: - * priv->lock protects most of the fields of priv and most of the - * hardware registers. It does not have to protect against softirqs - * between sc92031_disable_interrupts and sc92031_enable_interrupts; - * it also does not need to be used in ->open and ->stop while the - * device interrupts are off. - * Not having to protect against softirqs is very useful due to heavy - * use of mdelay() at _sc92031_reset. - * Functions prefixed with _sc92031_ must be called with the lock held; - * functions prefixed with sc92031_ must be called without the lock held. - * Use mmiowb() before unlocking if the hardware was written to. - */ - -/* Locking rules for the interrupt: - * - the interrupt and the tasklet never run at the same time - * - neither run between sc92031_disable_interrupts and - * sc92031_enable_interrupt - */ - -struct sc92031_priv { - spinlock_t lock; - /* iomap.h cookie */ - void __iomem *port_base; - /* pci device structure */ - struct pci_dev *pdev; - /* tasklet */ - struct tasklet_struct tasklet; - - /* CPU address of rx ring */ - void *rx_ring; - /* PCI address of rx ring */ - dma_addr_t rx_ring_dma_addr; - /* PCI address of rx ring read pointer */ - dma_addr_t rx_ring_tail; - - /* tx ring write index */ - unsigned tx_head; - /* tx ring read index */ - unsigned tx_tail; - /* CPU address of tx bounce buffer */ - void *tx_bufs; - /* PCI address of tx bounce buffer */ - dma_addr_t tx_bufs_dma_addr; - - /* copies of some hardware registers */ - u32 intr_status; - atomic_t intr_mask; - u32 rx_config; - u32 tx_config; - u32 pm_config; - - /* copy of some flags from dev->flags */ - unsigned int mc_flags; - - /* for ETHTOOL_GSTATS */ - u64 tx_timeouts; - u64 rx_loss; - - /* for dev->get_stats */ - long rx_value; -}; - -/* I don't know which registers can be safely read; however, I can guess - * MAC0 is one of them. */ -static inline void _sc92031_dummy_read(void __iomem *port_base) -{ - ioread32(port_base + MAC0); -} - -static u32 _sc92031_mii_wait(void __iomem *port_base) -{ - u32 mii_status; - - do { - udelay(10); - mii_status = ioread32(port_base + Miistatus); - } while (mii_status & Mii_StatusBusy); - - return mii_status; -} - -static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1) -{ - iowrite32(Mii_Divider, port_base + Miicmd0); - - _sc92031_mii_wait(port_base); - - iowrite32(cmd1, port_base + Miicmd1); - iowrite32(Mii_Divider | cmd0, port_base + Miicmd0); - - return _sc92031_mii_wait(port_base); -} - -static void _sc92031_mii_scan(void __iomem *port_base) -{ - _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6); -} - -static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg) -{ - return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13; -} - -static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val) -{ - _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11)); -} - -static void sc92031_disable_interrupts(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - /* tell the tasklet/interrupt not to enable interrupts */ - atomic_set(&priv->intr_mask, 0); - wmb(); - - /* stop interrupts */ - iowrite32(0, port_base + IntrMask); - _sc92031_dummy_read(port_base); - mmiowb(); - - /* wait for any concurrent interrupt/tasklet to finish */ - synchronize_irq(dev->irq); - tasklet_disable(&priv->tasklet); -} - -static void sc92031_enable_interrupts(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - tasklet_enable(&priv->tasklet); - - atomic_set(&priv->intr_mask, IntrBits); - wmb(); - - iowrite32(IntrBits, port_base + IntrMask); - mmiowb(); -} - -static void _sc92031_disable_tx_rx(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - priv->rx_config &= ~RxEnb; - priv->tx_config &= ~TxEnb; - iowrite32(priv->rx_config, port_base + RxConfig); - iowrite32(priv->tx_config, port_base + TxConfig); -} - -static void _sc92031_enable_tx_rx(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - priv->rx_config |= RxEnb; - priv->tx_config |= TxEnb; - iowrite32(priv->rx_config, port_base + RxConfig); - iowrite32(priv->tx_config, port_base + TxConfig); -} - -static void _sc92031_tx_clear(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - - while (priv->tx_head - priv->tx_tail > 0) { - priv->tx_tail++; - dev->stats.tx_dropped++; - } - priv->tx_head = priv->tx_tail = 0; -} - -static void _sc92031_set_mar(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u32 mar0 = 0, mar1 = 0; - - if ((dev->flags & IFF_PROMISC) || - netdev_mc_count(dev) > multicast_filter_limit || - (dev->flags & IFF_ALLMULTI)) - mar0 = mar1 = 0xffffffff; - else if (dev->flags & IFF_MULTICAST) { - struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) { - u32 crc; - unsigned bit = 0; - - crc = ~ether_crc(ETH_ALEN, ha->addr); - crc >>= 24; - - if (crc & 0x01) bit |= 0x02; - if (crc & 0x02) bit |= 0x01; - if (crc & 0x10) bit |= 0x20; - if (crc & 0x20) bit |= 0x10; - if (crc & 0x40) bit |= 0x08; - if (crc & 0x80) bit |= 0x04; - - if (bit > 31) - mar0 |= 0x1 << (bit - 32); - else - mar1 |= 0x1 << bit; - } - } - - iowrite32(mar0, port_base + MAR0); - iowrite32(mar1, port_base + MAR0 + 4); -} - -static void _sc92031_set_rx_config(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - unsigned int old_mc_flags; - u32 rx_config_bits = 0; - - old_mc_flags = priv->mc_flags; - - if (dev->flags & IFF_PROMISC) - rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast - | RxMulticast | RxAllphys; - - if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) - rx_config_bits |= RxMulticast; - - if (dev->flags & IFF_BROADCAST) - rx_config_bits |= RxBroadcast; - - priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast - | RxMulticast | RxAllphys); - priv->rx_config |= rx_config_bits; - - priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI - | IFF_MULTICAST | IFF_BROADCAST); - - if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags) - iowrite32(priv->rx_config, port_base + RxConfig); -} - -static bool _sc92031_check_media(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u16 bmsr; - - bmsr = _sc92031_mii_read(port_base, MII_BMSR); - rmb(); - if (bmsr & BMSR_LSTATUS) { - bool speed_100, duplex_full; - u32 flow_ctrl_config = 0; - u16 output_status = _sc92031_mii_read(port_base, - MII_OutputStatus); - _sc92031_mii_scan(port_base); - - speed_100 = output_status & 0x2; - duplex_full = output_status & 0x4; - - /* Initial Tx/Rx configuration */ - priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift); - priv->tx_config = 0x48800000; - - /* NOTE: vendor driver had dead code here to enable tx padding */ - - if (!speed_100) - priv->tx_config |= 0x80000; - - // configure rx mode - _sc92031_set_rx_config(dev); - - if (duplex_full) { - priv->rx_config |= RxFullDx; - priv->tx_config |= TxFullDx; - flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb; - } else { - priv->rx_config &= ~RxFullDx; - priv->tx_config &= ~TxFullDx; - } - - _sc92031_set_mar(dev); - _sc92031_set_rx_config(dev); - _sc92031_enable_tx_rx(dev); - iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig); - - netif_carrier_on(dev); - - if (printk_ratelimit()) - printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n", - dev->name, - speed_100 ? "100" : "10", - duplex_full ? "full" : "half"); - return true; - } else { - _sc92031_mii_scan(port_base); - - netif_carrier_off(dev); - - _sc92031_disable_tx_rx(dev); - - if (printk_ratelimit()) - printk(KERN_INFO "%s: link down\n", dev->name); - return false; - } -} - -static void _sc92031_phy_reset(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u32 phy_ctrl; - - phy_ctrl = ioread32(port_base + PhyCtrl); - phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10); - phy_ctrl |= PhyCtrlAne | PhyCtrlReset; - - switch (media) { - default: - case AUTOSELECT: - phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; - break; - case M10_HALF: - phy_ctrl |= PhyCtrlSpd10; - break; - case M10_FULL: - phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10; - break; - case M100_HALF: - phy_ctrl |= PhyCtrlSpd100; - break; - case M100_FULL: - phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; - break; - } - - iowrite32(phy_ctrl, port_base + PhyCtrl); - mdelay(10); - - phy_ctrl &= ~PhyCtrlReset; - iowrite32(phy_ctrl, port_base + PhyCtrl); - mdelay(1); - - _sc92031_mii_write(port_base, MII_JAB, - PHY_16_JAB_ENB | PHY_16_PORT_ENB); - _sc92031_mii_scan(port_base); - - netif_carrier_off(dev); - netif_stop_queue(dev); -} - -static void _sc92031_reset(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - /* disable PM */ - iowrite32(0, port_base + PMConfig); - - /* soft reset the chip */ - iowrite32(Cfg0_Reset, port_base + Config0); - mdelay(200); - - iowrite32(0, port_base + Config0); - mdelay(10); - - /* disable interrupts */ - iowrite32(0, port_base + IntrMask); - - /* clear multicast address */ - iowrite32(0, port_base + MAR0); - iowrite32(0, port_base + MAR0 + 4); - - /* init rx ring */ - iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr); - priv->rx_ring_tail = priv->rx_ring_dma_addr; - - /* init tx ring */ - _sc92031_tx_clear(dev); - - /* clear old register values */ - priv->intr_status = 0; - atomic_set(&priv->intr_mask, 0); - priv->rx_config = 0; - priv->tx_config = 0; - priv->mc_flags = 0; - - /* configure rx buffer size */ - /* NOTE: vendor driver had dead code here to enable early tx/rx */ - iowrite32(Cfg1_Rcv64K, port_base + Config1); - - _sc92031_phy_reset(dev); - _sc92031_check_media(dev); - - /* calculate rx fifo overflow */ - priv->rx_value = 0; - - /* enable PM */ - iowrite32(priv->pm_config, port_base + PMConfig); - - /* clear intr register */ - ioread32(port_base + IntrStatus); -} - -static void _sc92031_tx_tasklet(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - unsigned old_tx_tail; - unsigned entry; - u32 tx_status; - - old_tx_tail = priv->tx_tail; - while (priv->tx_head - priv->tx_tail > 0) { - entry = priv->tx_tail % NUM_TX_DESC; - tx_status = ioread32(port_base + TxStatus0 + entry * 4); - - if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted))) - break; - - priv->tx_tail++; - - if (tx_status & TxStatOK) { - dev->stats.tx_bytes += tx_status & 0x1fff; - dev->stats.tx_packets++; - /* Note: TxCarrierLost is always asserted at 100mbps. */ - dev->stats.collisions += (tx_status >> 22) & 0xf; - } - - if (tx_status & (TxOutOfWindow | TxAborted)) { - dev->stats.tx_errors++; - - if (tx_status & TxAborted) - dev->stats.tx_aborted_errors++; - - if (tx_status & TxCarrierLost) - dev->stats.tx_carrier_errors++; - - if (tx_status & TxOutOfWindow) - dev->stats.tx_window_errors++; - } - - if (tx_status & TxUnderrun) - dev->stats.tx_fifo_errors++; - } - - if (priv->tx_tail != old_tx_tail) - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); -} - -static void _sc92031_rx_tasklet_error(struct net_device *dev, - u32 rx_status, unsigned rx_size) -{ - if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { - dev->stats.rx_errors++; - dev->stats.rx_length_errors++; - } - - if (!(rx_status & RxStatesOK)) { - dev->stats.rx_errors++; - - if (rx_status & (RxHugeFrame | RxSmallFrame)) - dev->stats.rx_length_errors++; - - if (rx_status & RxBadAlign) - dev->stats.rx_frame_errors++; - - if (!(rx_status & RxCRCOK)) - dev->stats.rx_crc_errors++; - } else { - struct sc92031_priv *priv = netdev_priv(dev); - priv->rx_loss++; - } -} - -static void _sc92031_rx_tasklet(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - dma_addr_t rx_ring_head; - unsigned rx_len; - unsigned rx_ring_offset; - void *rx_ring = priv->rx_ring; - - rx_ring_head = ioread32(port_base + RxBufWPtr); - rmb(); - - /* rx_ring_head is only 17 bits in the RxBufWPtr register. - * we need to change it to 32 bits physical address - */ - rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1); - rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1); - if (rx_ring_head < priv->rx_ring_dma_addr) - rx_ring_head += RX_BUF_LEN; - - if (rx_ring_head >= priv->rx_ring_tail) - rx_len = rx_ring_head - priv->rx_ring_tail; - else - rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head); - - if (!rx_len) - return; - - if (unlikely(rx_len > RX_BUF_LEN)) { - if (printk_ratelimit()) - printk(KERN_ERR "%s: rx packets length > rx buffer\n", - dev->name); - return; - } - - rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN; - - while (rx_len) { - u32 rx_status; - unsigned rx_size, rx_size_align, pkt_size; - struct sk_buff *skb; - - rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset)); - rmb(); - - rx_size = rx_status >> 20; - rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned - pkt_size = rx_size - 4; // Omit the four octet CRC from the length. - - rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN; - - if (unlikely(rx_status == 0 || - rx_size > (MAX_ETH_FRAME_SIZE + 4) || - rx_size < 16 || - !(rx_status & RxStatesOK))) { - _sc92031_rx_tasklet_error(dev, rx_status, rx_size); - break; - } - - if (unlikely(rx_size_align + 4 > rx_len)) { - if (printk_ratelimit()) - printk(KERN_ERR "%s: rx_len is too small\n", dev->name); - break; - } - - rx_len -= rx_size_align + 4; - - skb = netdev_alloc_skb_ip_align(dev, pkt_size); - if (unlikely(!skb)) { - if (printk_ratelimit()) - printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", - dev->name, pkt_size); - goto next; - } - - if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { - memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), - rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); - memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)), - rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset)); - } else { - memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); - } - - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - - dev->stats.rx_bytes += pkt_size; - dev->stats.rx_packets++; - - if (rx_status & Rx_Multicast) - dev->stats.multicast++; - - next: - rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; - } - mb(); - - priv->rx_ring_tail = rx_ring_head; - iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr); -} - -static void _sc92031_link_tasklet(struct net_device *dev) -{ - if (_sc92031_check_media(dev)) - netif_wake_queue(dev); - else { - netif_stop_queue(dev); - dev->stats.tx_carrier_errors++; - } -} - -static void sc92031_tasklet(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u32 intr_status, intr_mask; - - intr_status = priv->intr_status; - - spin_lock(&priv->lock); - - if (unlikely(!netif_running(dev))) - goto out; - - if (intr_status & TxOK) - _sc92031_tx_tasklet(dev); - - if (intr_status & RxOK) - _sc92031_rx_tasklet(dev); - - if (intr_status & RxOverflow) - dev->stats.rx_errors++; - - if (intr_status & TimeOut) { - dev->stats.rx_errors++; - dev->stats.rx_length_errors++; - } - - if (intr_status & (LinkFail | LinkOK)) - _sc92031_link_tasklet(dev); - -out: - intr_mask = atomic_read(&priv->intr_mask); - rmb(); - - iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); - - spin_unlock(&priv->lock); -} - -static irqreturn_t sc92031_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u32 intr_status, intr_mask; - - /* mask interrupts before clearing IntrStatus */ - iowrite32(0, port_base + IntrMask); - _sc92031_dummy_read(port_base); - - intr_status = ioread32(port_base + IntrStatus); - if (unlikely(intr_status == 0xffffffff)) - return IRQ_NONE; // hardware has gone missing - - intr_status &= IntrBits; - if (!intr_status) - goto out_none; - - priv->intr_status = intr_status; - tasklet_schedule(&priv->tasklet); - - return IRQ_HANDLED; - -out_none: - intr_mask = atomic_read(&priv->intr_mask); - rmb(); - - iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); - - return IRQ_NONE; -} - -static struct net_device_stats *sc92031_get_stats(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - - // FIXME I do not understand what is this trying to do. - if (netif_running(dev)) { - int temp; - - spin_lock_bh(&priv->lock); - - /* Update the error count. */ - temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff; - - if (temp == 0xffff) { - priv->rx_value += temp; - dev->stats.rx_fifo_errors = priv->rx_value; - } else - dev->stats.rx_fifo_errors = temp + priv->rx_value; - - spin_unlock_bh(&priv->lock); - } - - return &dev->stats; -} - -static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - unsigned len; - unsigned entry; - u32 tx_status; - - if (unlikely(skb->len > TX_BUF_SIZE)) { - dev->stats.tx_dropped++; - goto out; - } - - spin_lock(&priv->lock); - - if (unlikely(!netif_carrier_ok(dev))) { - dev->stats.tx_dropped++; - goto out_unlock; - } - - BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC); - - entry = priv->tx_head++ % NUM_TX_DESC; - - skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); - - len = skb->len; - if (len < ETH_ZLEN) { - memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, - 0, ETH_ZLEN - len); - len = ETH_ZLEN; - } - - wmb(); - - if (len < 100) - tx_status = len; - else if (len < 300) - tx_status = 0x30000 | len; - else - tx_status = 0x50000 | len; - - iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE, - port_base + TxAddr0 + entry * 4); - iowrite32(tx_status, port_base + TxStatus0 + entry * 4); - mmiowb(); - - if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) - netif_stop_queue(dev); - -out_unlock: - spin_unlock(&priv->lock); - -out: - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - -static int sc92031_open(struct net_device *dev) -{ - int err; - struct sc92031_priv *priv = netdev_priv(dev); - struct pci_dev *pdev = priv->pdev; - - priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN, - &priv->rx_ring_dma_addr); - if (unlikely(!priv->rx_ring)) { - err = -ENOMEM; - goto out_alloc_rx_ring; - } - - priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN, - &priv->tx_bufs_dma_addr); - if (unlikely(!priv->tx_bufs)) { - err = -ENOMEM; - goto out_alloc_tx_bufs; - } - priv->tx_head = priv->tx_tail = 0; - - err = request_irq(pdev->irq, sc92031_interrupt, - IRQF_SHARED, dev->name, dev); - if (unlikely(err < 0)) - goto out_request_irq; - - priv->pm_config = 0; - - /* Interrupts already disabled by sc92031_stop or sc92031_probe */ - spin_lock_bh(&priv->lock); - - _sc92031_reset(dev); - mmiowb(); - - spin_unlock_bh(&priv->lock); - sc92031_enable_interrupts(dev); - - if (netif_carrier_ok(dev)) - netif_start_queue(dev); - else - netif_tx_disable(dev); - - return 0; - -out_request_irq: - pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, - priv->tx_bufs_dma_addr); -out_alloc_tx_bufs: - pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, - priv->rx_ring_dma_addr); -out_alloc_rx_ring: - return err; -} - -static int sc92031_stop(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - struct pci_dev *pdev = priv->pdev; - - netif_tx_disable(dev); - - /* Disable interrupts, stop Tx and Rx. */ - sc92031_disable_interrupts(dev); - - spin_lock_bh(&priv->lock); - - _sc92031_disable_tx_rx(dev); - _sc92031_tx_clear(dev); - mmiowb(); - - spin_unlock_bh(&priv->lock); - - free_irq(pdev->irq, dev); - pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, - priv->tx_bufs_dma_addr); - pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, - priv->rx_ring_dma_addr); - - return 0; -} - -static void sc92031_set_multicast_list(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - - spin_lock_bh(&priv->lock); - - _sc92031_set_mar(dev); - _sc92031_set_rx_config(dev); - mmiowb(); - - spin_unlock_bh(&priv->lock); -} - -static void sc92031_tx_timeout(struct net_device *dev) -{ - struct sc92031_priv *priv = netdev_priv(dev); - - /* Disable interrupts by clearing the interrupt mask.*/ - sc92031_disable_interrupts(dev); - - spin_lock(&priv->lock); - - priv->tx_timeouts++; - - _sc92031_reset(dev); - mmiowb(); - - spin_unlock(&priv->lock); - - /* enable interrupts */ - sc92031_enable_interrupts(dev); - - if (netif_carrier_ok(dev)) - netif_wake_queue(dev); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void sc92031_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE) - sc92031_tasklet((unsigned long)dev); - enable_irq(dev->irq); -} -#endif - -static int sc92031_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u8 phy_address; - u32 phy_ctrl; - u16 output_status; - - spin_lock_bh(&priv->lock); - - phy_address = ioread32(port_base + Miicmd1) >> 27; - phy_ctrl = ioread32(port_base + PhyCtrl); - - output_status = _sc92031_mii_read(port_base, MII_OutputStatus); - _sc92031_mii_scan(port_base); - mmiowb(); - - spin_unlock_bh(&priv->lock); - - cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII; - - cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; - - if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) - == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) - cmd->advertising |= ADVERTISED_Autoneg; - - if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10) - cmd->advertising |= ADVERTISED_10baseT_Half; - - if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux)) - == (PhyCtrlSpd10 | PhyCtrlDux)) - cmd->advertising |= ADVERTISED_10baseT_Full; - - if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100) - cmd->advertising |= ADVERTISED_100baseT_Half; - - if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux)) - == (PhyCtrlSpd100 | PhyCtrlDux)) - cmd->advertising |= ADVERTISED_100baseT_Full; - - if (phy_ctrl & PhyCtrlAne) - cmd->advertising |= ADVERTISED_Autoneg; - - ethtool_cmd_speed_set(cmd, - (output_status & 0x2) ? SPEED_100 : SPEED_10); - cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; - cmd->port = PORT_MII; - cmd->phy_address = phy_address; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - - return 0; -} - -static int sc92031_ethtool_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u32 speed = ethtool_cmd_speed(cmd); - u32 phy_ctrl; - u32 old_phy_ctrl; - - if (!(speed == SPEED_10 || speed == SPEED_100)) - return -EINVAL; - if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL)) - return -EINVAL; - if (!(cmd->port == PORT_MII)) - return -EINVAL; - if (!(cmd->phy_address == 0x1f)) - return -EINVAL; - if (!(cmd->transceiver == XCVR_INTERNAL)) - return -EINVAL; - if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE)) - return -EINVAL; - - if (cmd->autoneg == AUTONEG_ENABLE) { - if (!(cmd->advertising & (ADVERTISED_Autoneg - | ADVERTISED_100baseT_Full - | ADVERTISED_100baseT_Half - | ADVERTISED_10baseT_Full - | ADVERTISED_10baseT_Half))) - return -EINVAL; - - phy_ctrl = PhyCtrlAne; - - // FIXME: I'm not sure what the original code was trying to do - if (cmd->advertising & ADVERTISED_Autoneg) - phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; - if (cmd->advertising & ADVERTISED_100baseT_Full) - phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; - if (cmd->advertising & ADVERTISED_100baseT_Half) - phy_ctrl |= PhyCtrlSpd100; - if (cmd->advertising & ADVERTISED_10baseT_Full) - phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux; - if (cmd->advertising & ADVERTISED_10baseT_Half) - phy_ctrl |= PhyCtrlSpd10; - } else { - // FIXME: Whole branch guessed - phy_ctrl = 0; - - if (speed == SPEED_10) - phy_ctrl |= PhyCtrlSpd10; - else /* cmd->speed == SPEED_100 */ - phy_ctrl |= PhyCtrlSpd100; - - if (cmd->duplex == DUPLEX_FULL) - phy_ctrl |= PhyCtrlDux; - } - - spin_lock_bh(&priv->lock); - - old_phy_ctrl = ioread32(port_base + PhyCtrl); - phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux - | PhyCtrlSpd100 | PhyCtrlSpd10); - if (phy_ctrl != old_phy_ctrl) - iowrite32(phy_ctrl, port_base + PhyCtrl); - - spin_unlock_bh(&priv->lock); - - return 0; -} - -static void sc92031_ethtool_get_wol(struct net_device *dev, - struct ethtool_wolinfo *wolinfo) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u32 pm_config; - - spin_lock_bh(&priv->lock); - pm_config = ioread32(port_base + PMConfig); - spin_unlock_bh(&priv->lock); - - // FIXME: Guessed - wolinfo->supported = WAKE_PHY | WAKE_MAGIC - | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; - wolinfo->wolopts = 0; - - if (pm_config & PM_LinkUp) - wolinfo->wolopts |= WAKE_PHY; - - if (pm_config & PM_Magic) - wolinfo->wolopts |= WAKE_MAGIC; - - if (pm_config & PM_WakeUp) - // FIXME: Guessed - wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; -} - -static int sc92031_ethtool_set_wol(struct net_device *dev, - struct ethtool_wolinfo *wolinfo) -{ - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u32 pm_config; - - spin_lock_bh(&priv->lock); - - pm_config = ioread32(port_base + PMConfig) - & ~(PM_LinkUp | PM_Magic | PM_WakeUp); - - if (wolinfo->wolopts & WAKE_PHY) - pm_config |= PM_LinkUp; - - if (wolinfo->wolopts & WAKE_MAGIC) - pm_config |= PM_Magic; - - // FIXME: Guessed - if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) - pm_config |= PM_WakeUp; - - priv->pm_config = pm_config; - iowrite32(pm_config, port_base + PMConfig); - mmiowb(); - - spin_unlock_bh(&priv->lock); - - return 0; -} - -static int sc92031_ethtool_nway_reset(struct net_device *dev) -{ - int err = 0; - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem *port_base = priv->port_base; - u16 bmcr; - - spin_lock_bh(&priv->lock); - - bmcr = _sc92031_mii_read(port_base, MII_BMCR); - if (!(bmcr & BMCR_ANENABLE)) { - err = -EINVAL; - goto out; - } - - _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART); - -out: - _sc92031_mii_scan(port_base); - mmiowb(); - - spin_unlock_bh(&priv->lock); - - return err; -} - -static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = { - "tx_timeout", - "rx_loss", -}; - -static void sc92031_ethtool_get_strings(struct net_device *dev, - u32 stringset, u8 *data) -{ - if (stringset == ETH_SS_STATS) - memcpy(data, sc92031_ethtool_stats_strings, - SILAN_STATS_NUM * ETH_GSTRING_LEN); -} - -static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return SILAN_STATS_NUM; - default: - return -EOPNOTSUPP; - } -} - -static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct sc92031_priv *priv = netdev_priv(dev); - - spin_lock_bh(&priv->lock); - data[0] = priv->tx_timeouts; - data[1] = priv->rx_loss; - spin_unlock_bh(&priv->lock); -} - -static const struct ethtool_ops sc92031_ethtool_ops = { - .get_settings = sc92031_ethtool_get_settings, - .set_settings = sc92031_ethtool_set_settings, - .get_wol = sc92031_ethtool_get_wol, - .set_wol = sc92031_ethtool_set_wol, - .nway_reset = sc92031_ethtool_nway_reset, - .get_link = ethtool_op_get_link, - .get_strings = sc92031_ethtool_get_strings, - .get_sset_count = sc92031_ethtool_get_sset_count, - .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, -}; - - -static const struct net_device_ops sc92031_netdev_ops = { - .ndo_get_stats = sc92031_get_stats, - .ndo_start_xmit = sc92031_start_xmit, - .ndo_open = sc92031_open, - .ndo_stop = sc92031_stop, - .ndo_set_rx_mode = sc92031_set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_tx_timeout = sc92031_tx_timeout, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = sc92031_poll_controller, -#endif -}; - -static int __devinit sc92031_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - int err; - void __iomem* port_base; - struct net_device *dev; - struct sc92031_priv *priv; - u32 mac0, mac1; - unsigned long base_addr; - - err = pci_enable_device(pdev); - if (unlikely(err < 0)) - goto out_enable_device; - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (unlikely(err < 0)) - goto out_set_dma_mask; - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (unlikely(err < 0)) - goto out_set_dma_mask; - - err = pci_request_regions(pdev, SC92031_NAME); - if (unlikely(err < 0)) - goto out_request_regions; - - port_base = pci_iomap(pdev, SC92031_USE_BAR, 0); - if (unlikely(!port_base)) { - err = -EIO; - goto out_iomap; - } - - dev = alloc_etherdev(sizeof(struct sc92031_priv)); - if (unlikely(!dev)) { - err = -ENOMEM; - goto out_alloc_etherdev; - } - - pci_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - -#if SC92031_USE_BAR == 0 - dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR); - dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR); -#elif SC92031_USE_BAR == 1 - dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR); -#endif - dev->irq = pdev->irq; - - /* faked with skb_copy_and_csum_dev */ - dev->features = NETIF_F_SG | NETIF_F_HIGHDMA | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - - dev->netdev_ops = &sc92031_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - dev->ethtool_ops = &sc92031_ethtool_ops; - - priv = netdev_priv(dev); - spin_lock_init(&priv->lock); - priv->port_base = port_base; - priv->pdev = pdev; - tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev); - /* Fudge tasklet count so the call to sc92031_enable_interrupts at - * sc92031_open will work correctly */ - tasklet_disable_nosync(&priv->tasklet); - - /* PCI PM Wakeup */ - iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig); - - mac0 = ioread32(port_base + MAC0); - mac1 = ioread32(port_base + MAC0 + 4); - dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24; - dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16; - dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8; - dev->dev_addr[3] = dev->perm_addr[3] = mac0; - dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8; - dev->dev_addr[5] = dev->perm_addr[5] = mac1; - - err = register_netdev(dev); - if (err < 0) - goto out_register_netdev; - -#if SC92031_USE_BAR == 0 - base_addr = dev->mem_start; -#elif SC92031_USE_BAR == 1 - base_addr = dev->base_addr; -#endif - printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, - base_addr, dev->dev_addr, dev->irq); - - return 0; - -out_register_netdev: - free_netdev(dev); -out_alloc_etherdev: - pci_iounmap(pdev, port_base); -out_iomap: - pci_release_regions(pdev); -out_request_regions: -out_set_dma_mask: - pci_disable_device(pdev); -out_enable_device: - return err; -} - -static void __devexit sc92031_remove(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct sc92031_priv *priv = netdev_priv(dev); - void __iomem* port_base = priv->port_base; - - unregister_netdev(dev); - free_netdev(dev); - pci_iounmap(pdev, port_base); - pci_release_regions(pdev); - pci_disable_device(pdev); -} - -static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct sc92031_priv *priv = netdev_priv(dev); - - pci_save_state(pdev); - - if (!netif_running(dev)) - goto out; - - netif_device_detach(dev); - - /* Disable interrupts, stop Tx and Rx. */ - sc92031_disable_interrupts(dev); - - spin_lock_bh(&priv->lock); - - _sc92031_disable_tx_rx(dev); - _sc92031_tx_clear(dev); - mmiowb(); - - spin_unlock_bh(&priv->lock); - -out: - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int sc92031_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct sc92031_priv *priv = netdev_priv(dev); - - pci_restore_state(pdev); - pci_set_power_state(pdev, PCI_D0); - - if (!netif_running(dev)) - goto out; - - /* Interrupts already disabled by sc92031_suspend */ - spin_lock_bh(&priv->lock); - - _sc92031_reset(dev); - mmiowb(); - - spin_unlock_bh(&priv->lock); - sc92031_enable_interrupts(dev); - - netif_device_attach(dev); - - if (netif_carrier_ok(dev)) - netif_wake_queue(dev); - else - netif_tx_disable(dev); - -out: - return 0; -} - -static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = { - { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, - { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, - { PCI_DEVICE(0x1088, 0x2031) }, - { 0, } -}; -MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); - -static struct pci_driver sc92031_pci_driver = { - .name = SC92031_NAME, - .id_table = sc92031_pci_device_id_table, - .probe = sc92031_probe, - .remove = __devexit_p(sc92031_remove), - .suspend = sc92031_suspend, - .resume = sc92031_resume, -}; - -static int __init sc92031_init(void) -{ - return pci_register_driver(&sc92031_pci_driver); -} - -static void __exit sc92031_exit(void) -{ - pci_unregister_driver(&sc92031_pci_driver); -} - -module_init(sc92031_init); -module_exit(sc92031_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>"); -MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); |
