diff options
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e/netdev.c')
| -rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 1745 |
1 files changed, 1216 insertions, 529 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fbf75fdca99..201cc93f362 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,30 +1,23 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -42,7 +35,6 @@ #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> -#include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/cpu.h> @@ -56,7 +48,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION +#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -65,8 +57,6 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); - static const struct e1000_info *e1000_info_tbl[] = { [board_82571] = &e1000_82571_info, [board_82572] = &e1000_82572_info, @@ -87,20 +77,7 @@ struct e1000_reg_info { char *name; }; -#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ -#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ -#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ -#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ -#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ - -#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ - static const struct e1000_reg_info e1000_reg_info_tbl[] = { - /* General Registers */ {E1000_CTRL, "CTRL"}, {E1000_STATUS, "STATUS"}, @@ -147,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { }; /** + * __ew32_prepare - prepare to write to MAC CSR register on certain parts + * @hw: pointer to the HW structure + * + * When updating the MAC CSR registers, the Manageability Engine (ME) could + * be accessing the registers at the same time. Normally, this is handled in + * h/w by an arbiter but on some parts there is a bug that acknowledges Host + * accesses later than it should which could result in the register to have + * an incorrect value. Workaround this by checking the FWSM register which + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +s32 __ew32_prepare(struct e1000_hw *hw) +{ + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); + + return i; +} + +void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + __ew32_prepare(hw); + + writel(val, hw->hw_addr + reg); +} + +/** * e1000_regdump - register printout routine * @hw: pointer to the HW structure * @reginfo: pointer to the register info table @@ -233,9 +240,8 @@ static void e1000e_dump(struct e1000_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", - netdev->name, netdev->state, netdev->trans_start, - netdev->last_rx); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); } /* Print Registers */ @@ -488,21 +494,88 @@ static int e1000_desc_unused(struct e1000_ring *ring) } /** + * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp + * @adapter: board private structure + * @hwtstamps: time stamp structure to update + * @systim: unsigned 64bit system time value. + * + * Convert the system time value stored in the RX/TXSTMP registers into a + * hwtstamp which can be used by the upper level time stamping functions. + * + * The 'systim_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two 32 bit registers. The first read latches the + * value. + **/ +static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, systim); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +/** + * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp + * @adapter: board private structure + * @status: descriptor extended error and status field + * @skb: particular skb to include time stamp + * + * If the time stamp is valid, convert it into the timecounter ns value + * and store that result into the shhwtstamps structure which is passed + * up the network stack. + **/ +static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rxstmp; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || + !(status & E1000_RXDEXT_STATERR_TST) || + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + /* The Rx time stamp registers contain the time stamp. No other + * received packet will be time stamped until the Rx time stamp + * registers are read. Because only one packet can be time stamped + * at a time, the register values must belong to this packet and + * therefore none of the other additional attributes need to be + * compared. + */ + rxstmp = (u64)er32(RXSTMPL); + rxstmp |= (u64)er32(RXSTMPH) << 32; + e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); + + adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; +} + +/** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure - * @status: descriptor status field as written by hardware + * @staterr: descriptor extended error and status field as written by hardware * @vlan: descriptor vlan field as written by hardware (no le/be conversion) * @skb: pointer to sk_buff to be indicated to stack **/ static void e1000_receive_skb(struct e1000_adapter *adapter, struct net_device *netdev, struct sk_buff *skb, - u8 status, __le16 vlan) + u32 staterr, __le16 vlan) { u16 tag = le16_to_cpu(vlan); + + e1000e_rx_hwtstamp(adapter, staterr, skb); + skb->protocol = eth_type_trans(skb, netdev); - if (status & E1000_RXD_STAT_VP) - __vlan_hwaccel_put_tag(skb, tag); + if (staterr & E1000_RXD_STAT_VP) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); napi_gro_receive(&adapter->napi, skb); } @@ -556,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); e_err("ME firmware caused invalid RDT - resetting\n"); schedule_work(&adapter->reset_task); @@ -572,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { u32 tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); e_err("ME firmware caused invalid TDT - resetting\n"); schedule_work(&adapter->reset_task); @@ -702,8 +777,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, cpu_to_le64(ps_page->dma); } - skb = __netdev_alloc_skb_ip_align(netdev, - adapter->rx_ps_bsize0, + skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, gfp); if (!skb) { @@ -765,7 +839,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; - unsigned int bufsz = 256 - 16 /* for skb_reserve */; + unsigned int bufsz = 256 - 16; /* for skb_reserve */ i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; @@ -795,11 +869,16 @@ check_page: } } - if (!buffer_info->dma) + if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, - PAGE_SIZE, + buffer_info->page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } + } rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); @@ -831,7 +910,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, struct sk_buff *skb) { if (netdev->features & NETIF_F_RXHASH) - skb->rxhash = le32_to_cpu(rss); + skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); } /** @@ -884,10 +963,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, cleaned = true; cleaned_count++; - dma_unmap_single(&pdev->dev, - buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->wb.upper.length); @@ -1015,8 +1092,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring, static void e1000_print_hw_hang(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, - print_hang_task); + struct e1000_adapter, + print_hang_task); struct net_device *netdev = adapter->netdev; struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; @@ -1029,8 +1106,7 @@ static void e1000_print_hw_hang(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; - if (!adapter->tx_hang_recheck && - (adapter->flags2 & FLAG2_DMA_BURST)) { + if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { /* May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory */ @@ -1046,13 +1122,19 @@ static void e1000_print_hw_hang(struct work_struct *work) adapter->tx_hang_recheck = true; return; } - /* Real hang detected */ adapter->tx_hang_recheck = false; + + if (er32(TDH(0)) == er32(TDT(0))) { + e_dbg("false hang detected, ignoring\n"); + return; + } + + /* Real hang detected */ netif_stop_queue(netdev); - e1e_rphy(hw, PHY_STATUS, &phy_status); - e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); - e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); + e1e_rphy(hw, MII_BMSR, &phy_status); + e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); + e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); @@ -1072,19 +1154,12 @@ static void e1000_print_hw_hang(struct work_struct *work) "PHY 1000BASE-T Status <%x>\n" "PHY Extended Status <%x>\n" "PCI Status <%x>\n", - readl(tx_ring->head), - readl(tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->upper.fields.status, - er32(STATUS), - phy_status, - phy_1000t_status, - phy_ext_status, - pci_status); + readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, + tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), + phy_status, phy_1000t_status, phy_ext_status, pci_status); + + e1000e_dump(adapter); /* Suggest workaround for known h/w issue */ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) @@ -1092,6 +1167,44 @@ static void e1000_print_hw_hang(struct work_struct *work) } /** + * e1000e_tx_hwtstamp_work - check for Tx time stamp + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. The timestamp must + * be for this skb because only one such packet is allowed in the queue. + */ +static void e1000e_tx_hwtstamp_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + tx_hwtstamp_work); + struct e1000_hw *hw = &adapter->hw; + + if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp; + + txstmp = er32(TXSTMPL); + txstmp |= (u64)er32(TXSTMPH) << 32; + + e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); + + skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } else if (time_after(jiffies, adapter->tx_hwtstamp_start + + adapter->tx_timeout_factor * HZ)) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + e_warn("clearing Tx timestamp hang\n"); + } else { + /* reschedule to check later */ + schedule_work(&adapter->tx_hwtstamp_work); + } +} + +/** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: Tx descriptor ring * @@ -1117,7 +1230,8 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ + + rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1306,7 +1420,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, l1); goto copydone; - } /* if */ + } /* if */ } for (j = 0; j < PS_PAGE_BUFFERS; j++) { @@ -1342,11 +1456,11 @@ copydone: e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); if (rx_desc->wb.upper.header_status & - cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) adapter->rx_hdr_split++; - e1000_receive_skb(adapter, netdev, skb, - staterr, rx_desc->wb.middle.vlan); + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.middle.vlan); next_desc: rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); @@ -1380,7 +1494,7 @@ next_desc: * e1000_consume_page - helper function **/ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, - u16 length) + u16 length) { bi->page = NULL; skb->len += length; @@ -1407,7 +1521,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct skb_shared_info *shinfo; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); @@ -1453,7 +1568,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, rx_ring->rx_skb_top = NULL; goto next_desc; } - #define rxtop (rx_ring->rx_skb_top) if (!(staterr & E1000_RXD_STAT_EOP)) { /* this descriptor is only the beginning (or middle) */ @@ -1461,12 +1575,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, /* this is the beginning of a chain */ rxtop = skb; skb_fill_page_desc(rxtop, 0, buffer_info->page, - 0, length); + 0, length); } else { /* this is the middle of a chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); /* re-use the skb, only consumed the page */ buffer_info->skb = skb; } @@ -1475,9 +1590,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, } else { if (rxtop) { /* end of the chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); /* re-use the current skb, we only consumed the * page */ @@ -1502,10 +1618,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, length); } else { skb_fill_page_desc(skb, 0, - buffer_info->page, 0, - length); + buffer_info->page, 0, + length); e1000_consume_page(buffer_info, skb, - length); + length); } } } @@ -1578,8 +1694,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) dma_unmap_page(&pdev->dev, buffer_info->dma, - PAGE_SIZE, - DMA_FROM_DEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, @@ -1623,7 +1738,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) adapter->flags2 &= ~FLAG2_IS_DISCARDING; writel(0, rx_ring->head); - if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, 0); else writel(0, rx_ring->tail); @@ -1632,7 +1747,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) static void e1000e_downshift_workaround(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, downshift_task); + struct e1000_adapter, + downshift_task); if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -1645,7 +1761,7 @@ static void e1000e_downshift_workaround(struct work_struct *work) * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t e1000_intr_msi(int irq, void *data) +static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1670,14 +1786,32 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) adapter->flags & FLAG_RX_NEEDS_RESTART) { /* disable receives */ u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); - adapter->flags |= FLAG_RX_RESTART_NOW; + adapter->flags |= FLAG_RESTART_NOW; } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -1694,7 +1828,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t e1000_intr(int irq, void *data) +static irqreturn_t e1000_intr(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1702,7 +1836,7 @@ static irqreturn_t e1000_intr(int irq, void *data) u32 rctl, icr = er32(ICR); if (!icr || test_bit(__E1000_DOWN, &adapter->state)) - return IRQ_NONE; /* Not our interrupt */ + return IRQ_NONE; /* Not our interrupt */ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt @@ -1734,13 +1868,30 @@ static irqreturn_t e1000_intr(int irq, void *data) /* disable receives */ rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); - adapter->flags |= FLAG_RX_RESTART_NOW; + adapter->flags |= FLAG_RESTART_NOW; } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -1752,7 +1903,7 @@ static irqreturn_t e1000_intr(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t e1000_msix_other(int irq, void *data) +static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1784,15 +1935,13 @@ no_link_interrupt: return IRQ_HANDLED; } - -static irqreturn_t e1000_intr_msix_tx(int irq, void *data) +static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; - adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -1803,7 +1952,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t e1000_intr_msix_rx(int irq, void *data) +static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1845,11 +1994,11 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ if (hw->mac.type == e1000_82574) { u32 rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_ACK_DIS; ew32(RFCTL, rfctl); } -#define E1000_IVAR_INT_ALLOC_VALID 0x8 /* Configure Rx vector */ rx_ring->ims_val = E1000_IMS_RXQ0; adapter->eiac_mask |= rx_ring->ims_val; @@ -1890,7 +2039,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask Other interrupts upon ICR read */ -#define E1000_EIAC_MASK_82574 0x01F00000 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); ctrl_ext |= E1000_CTRL_EXT_EIAME; ew32(CTRL_EXT, ctrl_ext); @@ -1925,16 +2073,20 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) if (adapter->flags & FLAG_HAS_MSIX) { adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ adapter->msix_entries = kcalloc(adapter->num_vectors, - sizeof(struct msix_entry), - GFP_KERNEL); + sizeof(struct + msix_entry), + GFP_KERNEL); if (adapter->msix_entries) { + struct e1000_adapter *a = adapter; + for (i = 0; i < adapter->num_vectors; i++) adapter->msix_entries[i].entry = i; - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, - adapter->num_vectors); - if (err == 0) + err = pci_enable_msix_range(a->pdev, + a->msix_entries, + a->num_vectors, + a->num_vectors); + if (err > 0) return; } /* MSI-X failed, so fall through and try MSI */ @@ -2087,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) if (adapter->msix_entries) { int i; + for (i = 0; i < adapter->num_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } else { @@ -2104,6 +2257,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else if (hw->mac.type == e1000_pch_lpt) { + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); } @@ -2290,7 +2445,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) tx_ring->next_to_clean = 0; writel(0, tx_ring->head); - if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, 0); else writel(0, tx_ring->tail); @@ -2358,9 +2513,7 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring) * while increasing bulk throughput. This functionality is controlled * by the InterruptThrottleRate module parameter. **/ -static unsigned int e1000_update_itr(struct e1000_adapter *adapter, - u16 itr_setting, int packets, - int bytes) +static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) { unsigned int retval = itr_setting; @@ -2370,27 +2523,27 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ - if (bytes/packets > 8000) + if (bytes / packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; - case low_latency: /* 50 usec aka 20000 ints/s */ + case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ - if (bytes/packets > 8000) + if (bytes / packets > 8000) retval = bulk_latency; - else if ((packets < 10) || ((bytes/packets) > 1200)) + else if ((packets < 10) || ((bytes / packets) > 1200)) retval = bulk_latency; else if ((packets > 35)) retval = lowest_latency; - } else if (bytes/packets > 2000) { + } else if (bytes / packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; } break; - case bulk_latency: /* 250 usec aka 4000 ints/s */ + case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; @@ -2405,7 +2558,6 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, static void e1000_set_itr(struct e1000_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; u16 current_itr; u32 new_itr = adapter->itr; @@ -2421,31 +2573,29 @@ static void e1000_set_itr(struct e1000_adapter *adapter) goto set_itr_now; } - adapter->tx_itr = e1000_update_itr(adapter, - adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); + adapter->tx_itr = e1000_update_itr(adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) adapter->tx_itr = low_latency; - adapter->rx_itr = e1000_update_itr(adapter, - adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); + adapter->rx_itr = e1000_update_itr(adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) adapter->rx_itr = low_latency; current_itr = max(adapter->rx_itr, adapter->tx_itr); - switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ + switch (current_itr) { case lowest_latency: new_itr = 70000; break; case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ + new_itr = 20000; /* aka hwitr = ~200 */ break; case bulk_latency: new_itr = 4000; @@ -2461,17 +2611,13 @@ set_itr_now: * increasing */ new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; + min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; adapter->itr = new_itr; adapter->rx_ring->itr_val = new_itr; if (adapter->msix_entries) adapter->rx_ring->set_itr = 1; else - if (new_itr) - ew32(ITR, 1000000000 / (new_itr * 256)); - else - ew32(ITR, 0); + e1000e_write_itr(adapter, new_itr); } } @@ -2567,7 +2713,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight) return work_done; } -static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2592,7 +2739,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return 0; } -static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2636,7 +2784,8 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) ew32(RCTL, rctl); if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; } } @@ -2696,24 +2845,23 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter) u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { - e1000_vlan_rx_add_vid(netdev, vid); + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); adapter->mng_vlan_id = vid; } if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) - e1000_vlan_rx_kill_vid(netdev, old_vid); + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); } static void e1000_restore_vlan(struct e1000_adapter *adapter) { u16 vid; - e1000_vlan_rx_add_vid(adapter->netdev, 0); + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - e1000_vlan_rx_add_vid(adapter->netdev, vid); + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } static void e1000_init_manageability_pt(struct e1000_adapter *adapter) @@ -2789,7 +2937,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; u64 tdba; - u32 tdlen, tarc; + u32 tdlen, tctl, tarc; /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; @@ -2809,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) if (adapter->flags2 & FLAG2_DMA_BURST) { u32 txdctl = er32(TXDCTL(0)); + txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | E1000_TXDCTL_WTHRESH); /* set up some performance related parameters to encourage the @@ -2826,6 +2975,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* erratum work around: set txdctl the same for both queues */ ew32(TXDCTL(1), er32(TXDCTL(0))); + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { tarc = er32(TARC(0)); /* set the speed mode bit, we'll clear it if we're not at @@ -2856,6 +3011,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* enable Report Status bit */ adapter->txd_cmd |= E1000_TXD_CMD_RS; + ew32(TCTL, tctl); + hw->mac.ops.config_collision_dist(hw); } @@ -2871,7 +3028,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) u32 rctl, rfctl; u32 pages = 0; - /* Workaround Si errata on PCHx - configure jumbo frame flow */ + /* Workaround Si errata on PCHx - configure jumbo frame flow. + * If jumbo frames not set, program related MAC/PHY registers + * to h/w defaults + */ if (hw->mac.type >= e1000_pch2lan) { s32 ret_val; @@ -2881,15 +3041,15 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); if (ret_val) - e_dbg("failed to enable jumbo frame workaround mode\n"); + e_dbg("failed to enable|disable jumbo frame workaround mode\n"); } /* Program MC offset vector base */ rctl = er32(RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Do not Store bad packets */ rctl &= ~E1000_RCTL_SBP; @@ -2975,19 +3135,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* Enable Packet split descriptors */ rctl |= E1000_RCTL_DTYP_PS; - psrctl |= adapter->rx_ps_bsize0 >> - E1000_PSRCTL_BSIZE0_SHIFT; + psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; switch (adapter->rx_ps_pages) { case 3: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE3_SHIFT; + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; + /* fall-through */ case 2: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE2_SHIFT; + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; + /* fall-through */ case 1: - psrctl |= PAGE_SIZE >> - E1000_PSRCTL_BSIZE1_SHIFT; + psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; break; } @@ -2999,13 +3157,13 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ - rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ - E1000_RCTL_BAM | /* RX All Bcast Pkts */ - E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ - rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ - E1000_RCTL_DPF | /* Allow filtered pause */ - E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs. */ @@ -3013,7 +3171,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ew32(RCTL, rctl); /* just started the receive unit, no need to restart */ - adapter->flags &= ~FLAG_RX_RESTART_NOW; + adapter->flags &= ~FLAG_RESTART_NOW; } /** @@ -3108,18 +3266,24 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) rxcsum &= ~E1000_RXCSUM_TUOFL; ew32(RXCSUM, rxcsum); - if (adapter->hw.mac.type == e1000_pch2lan) { - /* With jumbo frames, excessive C-state transition - * latencies result in dropped transactions. - */ - if (adapter->netdev->mtu > ETH_DATA_LEN) { + /* With jumbo frames, excessive C-state transition latencies result + * in dropped transactions. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + u32 lat = + ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - + adapter->max_frame_size) * 8 / 1000; + + if (adapter->flags & FLAG_IS_ICH) { u32 rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl | 0x3); - pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); - } else { - pm_qos_update_request(&adapter->netdev->pm_qos_req, - PM_QOS_DEFAULT_VALUE); } + + pm_qos_update_request(&adapter->netdev->pm_qos_req, lat); + } else { + pm_qos_update_request(&adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); } /* Enable Receives */ @@ -3156,7 +3320,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev) /* update_mc_addr_list expects a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); hw->mac.ops.update_mc_addr_list(hw, mta_list, i); kfree(mta_list); @@ -3177,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - unsigned int rar_entries = hw->mac.rar_entry_count; + unsigned int rar_entries; int count = 0; + rar_entries = hw->mac.ops.rar_get_count(hw); + /* save a rar entry for our hardware address */ rar_entries--; @@ -3198,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) * combining */ netdev_for_each_uc_addr(ha, netdev) { + int rval; + if (!rar_entries) break; - hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); + rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); + if (rval < 0) + return -ENOMEM; count++; } } @@ -3230,6 +3400,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; u32 rctl; + if (pm_runtime_suspended(netdev->dev.parent)) + return; + /* Check for Promiscuous and All Multicast modes */ rctl = er32(RCTL); @@ -3266,7 +3439,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev) ew32(RCTL, rctl); - if (netdev->features & NETIF_F_HW_VLAN_RX) + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) e1000e_vlan_strip_enable(adapter); else e1000e_vlan_strip_disable(adapter); @@ -3308,6 +3481,243 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) } /** + * e1000e_get_base_timinca - get default SYSTIM time increment attributes + * @adapter: board private structure + * @timinca: pointer to returned time increment attributes + * + * Get attributes for incrementing the System Time Register SYSTIML/H at + * the default base frequency, and set the cyclecounter shift value. + **/ +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) +{ + struct e1000_hw *hw = &adapter->hw; + u32 incvalue, incperiod, shift; + + /* Make sure clock is enabled on I217 before checking the frequency */ + if ((hw->mac.type == e1000_pch_lpt) && + !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { + u32 fextnvm7 = er32(FEXTNVM7); + + if (!(fextnvm7 & (1 << 0))) { + ew32(FEXTNVM7, fextnvm7 | (1 << 0)); + e1e_flush(); + } + } + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + /* On I217, the clock frequency is 25MHz or 96MHz as + * indicated by the System Clock Frequency Indication + */ + if ((hw->mac.type != e1000_pch_lpt) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + break; + default: + return -EINVAL; + } + + *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | + ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); + + return 0; +} + +/** + * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable + * @adapter: board private structure + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 rxmtrl = 0; + u16 rxudp = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + s32 ret_val; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return -EINVAL; + + /* flags reserved for future extensions - must be zero */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + /* Also time stamps V2 L2 Path Delay Request/Response */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + /* Also time stamps V2 L2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* Hardware cannot filter just V2 L4 Sync messages; + * fall-through to V2 (both L2 and L4) Sync. + */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* Hardware cannot filter just V2 L4 Delay Request messages; + * fall-through to V2 (both L2 and L4) Delay Request. + */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + /* Hardware cannot filter just V2 L4 or L2 Event messages; + * fall-through to all V2 (both L2 and L4) Events. + */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* For V1, the hardware can only filter Sync messages or + * Delay Request messages but not both so fall-through to + * time stamp all packets. + */ + case HWTSTAMP_FILTER_ALL: + is_l2 = true; + is_l4 = true; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + adapter->hwtstamp_config = *config; + + /* enable/disable Tx h/w time stamping */ + regval = er32(TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + ew32(TSYNCTXCTL, regval); + if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != + (regval & E1000_TSYNCTXCTL_ENABLED)) { + e_err("Timesync Tx Control register not set as expected\n"); + return -EAGAIN; + } + + /* enable/disable Rx h/w time stamping */ + regval = er32(TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + ew32(TSYNCRXCTL, regval); + if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK)) != + (regval & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK))) { + e_err("Timesync Rx Control register not set as expected\n"); + return -EAGAIN; + } + + /* L2: define ethertype filter for time stamped packets */ + if (is_l2) + rxmtrl |= ETH_P_1588; + + /* define which PTP packets get time stamped */ + ew32(RXMTRL, rxmtrl); + + /* Filter by destination port */ + if (is_l4) { + rxudp = PTP_EV_PORT; + cpu_to_be16s(&rxudp); + } + ew32(RXUDP, rxudp); + + e1e_flush(); + + /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ + er32(RXSTMPH); + er32(TXSTMPH); + + /* Get and set the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, ®val); + if (ret_val) + return ret_val; + ew32(TIMINCA, regval); + + /* reset the ns time counter */ + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + return 0; +} + +/** * e1000_configure - configure the hardware for Rx and Tx * @adapter: private board structure **/ @@ -3353,10 +3763,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter) */ static void e1000_power_down_phy(struct e1000_adapter *adapter) { - /* WoL is enabled */ - if (adapter->wol) - return; - if (adapter->hw.phy.ops.power_down) adapter->hw.phy.ops.power_down(&adapter->hw); } @@ -3398,8 +3804,7 @@ void e1000e_reset(struct e1000_adapter *adapter) * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; + sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ @@ -3455,7 +3860,7 @@ void e1000e_reset(struct e1000_adapter *adapter) hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - adapter->max_frame_size)); - fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; break; case e1000_pchlan: @@ -3464,23 +3869,28 @@ void e1000e_reset(struct e1000_adapter *adapter) */ if (adapter->netdev->mtu > ETH_DATA_LEN) { fc->high_water = 0x3500; - fc->low_water = 0x1500; + fc->low_water = 0x1500; } else { fc->high_water = 0x5000; - fc->low_water = 0x3000; + fc->low_water = 0x3000; } fc->refresh_time = 0x1000; break; case e1000_pch2lan: case e1000_pch_lpt: - fc->high_water = 0x05C20; - fc->low_water = 0x05048; - fc->pause_time = 0x0650; fc->refresh_time = 0x0400; - if (adapter->netdev->mtu > ETH_DATA_LEN) { - pba = 14; - ew32(PBA, pba); + + if (adapter->netdev->mtu <= ETH_DATA_LEN) { + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + break; } + + pba = 14; + ew32(PBA, pba); + fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; + fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; break; } @@ -3499,13 +3909,13 @@ void e1000e_reset(struct e1000_adapter *adapter) if ((adapter->max_frame_size * 2) > (pba << 10)) { if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned off\n"); + "Interrupt Throttle Rate off\n"); adapter->flags2 |= FLAG2_DISABLE_AIM; e1000e_write_itr(adapter, 0); } } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned on\n"); + "Interrupt Throttle Rate on\n"); adapter->flags2 &= ~FLAG2_DISABLE_AIM; adapter->itr = 20000; e1000e_write_itr(adapter, adapter->itr); @@ -3533,11 +3943,44 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); + /* initialize systim and reset the ns time counter */ + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); + + /* Set EEE advertisement as appropriate */ + if (adapter->flags2 & FLAG2_HAS_EEE) { + s32 ret_val; + u16 adv_addr; + + switch (hw->phy.type) { + case e1000_phy_82579: + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + dev_err(&adapter->pdev->dev, + "Invalid PHY type setting EEE advertisement\n"); + return; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + dev_err(&adapter->pdev->dev, + "EEE advertisement - unable to acquire PHY\n"); + return; + } + + e1000_write_emi_reg_locked(hw, adv_addr, + hw->dev_spec.ich8lan.eee_disable ? + 0 : adapter->eee_advert); + + hw->phy.ops.release(hw); + } + if (!netif_running(adapter->netdev) && - !test_bit(__E1000_TESTING, &adapter->state)) { + !test_bit(__E1000_TESTING, &adapter->state)) e1000_power_down_phy(adapter); - return; - } e1000_get_phy_info(hw); @@ -3604,7 +4047,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter) static void e1000e_update_stats(struct e1000_adapter *adapter); -void e1000e_down(struct e1000_adapter *adapter) +/** + * e1000e_down - quiesce the device and optionally reset the hardware + * @adapter: board private structure + * @reset: boolean flag to reset the hardware or not + */ +void e1000e_down(struct e1000_adapter *adapter, bool reset) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; @@ -3634,6 +4082,8 @@ void e1000e_down(struct e1000_adapter *adapter) e1000_irq_disable(adapter); + napi_synchronize(&adapter->napi); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); @@ -3650,12 +4100,14 @@ void e1000e_down(struct e1000_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; - if (!pci_channel_offline(adapter->pdev)) - e1000e_reset(adapter); + /* Disable Si errata workaround on PCHx for jumbo frame flow */ + if ((hw->mac.type >= e1000_pch2lan) && + (adapter->netdev->mtu > ETH_DATA_LEN) && + e1000_lv_jumbo_workaround_ich8lan(hw, false)) + e_dbg("failed to disable jumbo frame workaround mode\n"); - /* TODO: for power management, we could drop the link and - * pci_disable_device here. - */ + if (reset && !pci_channel_offline(adapter->pdev)) + e1000e_reset(adapter); } void e1000e_reinit_locked(struct e1000_adapter *adapter) @@ -3663,12 +4115,55 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) might_sleep(); while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); - e1000e_down(adapter); + e1000e_down(adapter, true); e1000e_up(adapter); clear_bit(__E1000_RESETTING, &adapter->state); } /** + * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) + * @cc: cyclecounter structure + **/ +static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) +{ + struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, + cc); + struct e1000_hw *hw = &adapter->hw; + cycle_t systim, systim_next; + + /* latch SYSTIMH on read of SYSTIML */ + systim = (cycle_t)er32(SYSTIML); + systim |= (cycle_t)er32(SYSTIMH) << 32; + + if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { + u64 incvalue, time_delta, rem, temp; + int i; + + /* errata for 82574/82583 possible bad bits read from SYSTIMH/L + * check to see that the time is incrementing at a reasonable + * rate and is a multiple of incvalue + */ + incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; + for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { + /* latch SYSTIMH on read of SYSTIML */ + systim_next = (cycle_t)er32(SYSTIML); + systim_next |= (cycle_t)er32(SYSTIMH) << 32; + + time_delta = systim_next - systim; + temp = time_delta; + rem = do_div(temp, incvalue); + + systim = systim_next; + + if ((time_delta < E1000_82574_SYSTIM_EPSILON) && + (rem == 0)) + break; + } + } + return systim; +} + +/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * @@ -3694,6 +4189,17 @@ static int e1000_sw_init(struct e1000_adapter *adapter) if (e1000_alloc_queues(adapter)) return -ENOMEM; + /* Setup hardware time stamping cyclecounter */ + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + adapter->cc.read = e1000e_cyclecounter_read; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + /* cc.shift set in e1000e_get_base_tininca() */ + + spin_lock_init(&adapter->systim_lock); + INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); + } + /* Explicitly disable IRQ since the NIC can be in any state. */ e1000_irq_disable(adapter); @@ -3706,7 +4212,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter) * @irq: interrupt number * @data: pointer to a network interface device structure **/ -static irqreturn_t e1000_intr_msi_test(int irq, void *data) +static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -3872,15 +4378,12 @@ static int e1000_open(struct net_device *netdev) e1000e_power_up_phy(adapter); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ - if (adapter->hw.mac.type == e1000_pch2lan) - pm_qos_add_request(&adapter->netdev->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt @@ -3915,7 +4418,7 @@ static int e1000_open(struct net_device *netdev) adapter->tx_hang_recheck = false; netif_start_queue(netdev); - adapter->idle_check = true; + hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); /* fire a link status change interrupt to start the watchdog */ @@ -3963,13 +4466,15 @@ static int e1000_close(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); - napi_disable(&adapter->napi); - if (!test_bit(__E1000_DOWN, &adapter->state)) { - e1000e_down(adapter); + e1000e_down(adapter, true); e1000_free_irq(adapter); + + /* Link status message must follow this format */ + pr_info("%s NIC Link is Down\n", adapter->netdev->name); } - e1000_power_down_phy(adapter); + + napi_disable(&adapter->napi); e1000e_free_tx_resources(adapter->tx_ring); e1000e_free_rx_resources(adapter->rx_ring); @@ -3977,9 +4482,9 @@ static int e1000_close(struct net_device *netdev) /* kill manageability vlan ID if supported, but not if a vlan with * the same ID is registered on the host OS (let 8021q kill it) */ - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); /* If AMT is enabled, let the firmware know that the network * interface is now closed @@ -3988,13 +4493,13 @@ static int e1000_close(struct net_device *netdev) !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter); - if (adapter->hw.mac.type == e1000_pch2lan) - pm_qos_remove_request(&adapter->netdev->pm_qos_req); + pm_qos_remove_request(&adapter->netdev->pm_qos_req); pm_runtime_put_sync(&pdev->dev); return 0; } + /** * e1000_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -4045,12 +4550,18 @@ static int e1000_set_mac(struct net_device *netdev, void *p) static void e1000e_update_phy_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, update_phy_task); + struct e1000_adapter, + update_phy_task); + struct e1000_hw *hw = &adapter->hw; if (test_bit(__E1000_DOWN, &adapter->state)) return; - e1000_get_phy_info(&adapter->hw); + e1000_get_phy_info(hw); + + /* Enable EEE on 82579 after link up */ + if (hw->phy.type >= e1000_phy_82579) + e1000_set_eee_pchlan(hw); } /** @@ -4062,7 +4573,7 @@ static void e1000e_update_phy_task(struct work_struct *work) **/ static void e1000_update_phy_info(unsigned long data) { - struct e1000_adapter *adapter = (struct e1000_adapter *) data; + struct e1000_adapter *adapter = (struct e1000_adapter *)data; if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -4168,7 +4679,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.crcerrs += er32(CRCERRS); adapter->stats.gprc += er32(GPRC); adapter->stats.gorc += er32(GORCL); - er32(GORCH); /* Clear gorc */ + er32(GORCH); /* Clear gorc */ adapter->stats.bprc += er32(BPRC); adapter->stats.mprc += er32(MPRC); adapter->stats.roc += er32(ROC); @@ -4201,7 +4712,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.xofftxc += er32(XOFFTXC); adapter->stats.gptc += er32(GPTC); adapter->stats.gotc += er32(GOTCL); - er32(GOTCH); /* Clear gotc */ + er32(GOTCH); /* Clear gotc */ adapter->stats.rnbc += er32(RNBC); adapter->stats.ruc += er32(RUC); @@ -4229,18 +4740,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) * our own version based on RUC and ROC */ netdev->stats.rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; netdev->stats.rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; + adapter->stats.roc; netdev->stats.rx_crc_errors = adapter->stats.crcerrs; netdev->stats.rx_frame_errors = adapter->stats.algnerrc; netdev->stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - netdev->stats.tx_errors = adapter->stats.ecol + - adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; netdev->stats.tx_aborted_errors = adapter->stats.ecol; netdev->stats.tx_window_errors = adapter->stats.latecol; netdev->stats.tx_carrier_errors = adapter->stats.tncrs; @@ -4251,6 +4760,17 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgptc += er32(MGTPTC); adapter->stats.mgprc += er32(MGTPRC); adapter->stats.mgpdc += er32(MGTPDC); + + /* Correctable ECC Errors */ + if (hw->mac.type == e1000_pch_lpt) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + } } /** @@ -4262,18 +4782,19 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_phy_regs *phy = &adapter->phy_regs; - if ((er32(STATUS) & E1000_STATUS_LU) && + if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && + (er32(STATUS) & E1000_STATUS_LU) && (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); - ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); - ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); - ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); - ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); - ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); - ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); - ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); + ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); + ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); + ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); + ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); + ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); + ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); + ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); } else { @@ -4300,9 +4821,8 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) u32 ctrl = er32(CTRL); /* Link status message must follow this format for user tools */ - printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", - adapter->netdev->name, - adapter->link_speed, + pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : (ctrl & E1000_CTRL_RFCE) ? "Rx" : @@ -4355,11 +4875,12 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter) { /* make sure the receive unit is started */ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && - (adapter->flags & FLAG_RX_RESTART_NOW)) { + (adapter->flags & FLAG_RESTART_NOW)) { struct e1000_hw *hw = &adapter->hw; u32 rctl = er32(RCTL); + ew32(RCTL, rctl | E1000_RCTL_EN); - adapter->flags &= ~FLAG_RX_RESTART_NOW; + adapter->flags &= ~FLAG_RESTART_NOW; } } @@ -4377,6 +4898,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) if (adapter->phy_hang_count > 1) { adapter->phy_hang_count = 0; + e_dbg("PHY appears hung - resetting\n"); schedule_work(&adapter->reset_task); } } @@ -4387,7 +4909,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) **/ static void e1000_watchdog(unsigned long data) { - struct e1000_adapter *adapter = (struct e1000_adapter *) data; + struct e1000_adapter *adapter = (struct e1000_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -4398,7 +4920,8 @@ static void e1000_watchdog(unsigned long data) static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, watchdog_task); + struct e1000_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@ -4432,23 +4955,30 @@ static void e1000_watchdog_task(struct work_struct *work) /* update snapshot of PHY registers on LSC */ e1000_phy_read_status(adapter); mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); e1000_print_link_info(adapter); + + /* check if SmartSpeed worked */ + e1000e_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, + "Link Speed was downgraded by SmartSpeed\n"); + /* On supported PHYs, check for duplex mismatch only * if link has autonegotiated at 10/100 half */ if ((hw->phy.type == e1000_phy_igp_3 || hw->phy.type == e1000_phy_bm) && - (hw->mac.autoneg == true) && + hw->mac.autoneg && (adapter->link_speed == SPEED_10 || adapter->link_speed == SPEED_100) && (adapter->link_duplex == HALF_DUPLEX)) { u16 autoneg_exp; - e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); + e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); - if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) + if (!(autoneg_exp & EXPANSION_NWAY)) e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); } @@ -4471,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work) if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && !txb2b) { u32 tarc0; + tarc0 = er32(TARC(0)); tarc0 &= ~SPEED_MODE_BIT; ew32(TARC(0), tarc0); @@ -4521,18 +5052,21 @@ static void e1000_watchdog_task(struct work_struct *work) adapter->link_speed = 0; adapter->link_duplex = 0; /* Link status message must follow this format */ - printk(KERN_INFO "e1000e: %s NIC Link is Down\n", - adapter->netdev->name); + pr_info("%s NIC Link is Down\n", adapter->netdev->name); netif_carrier_off(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); + /* 8000ES2LAN requires a Rx packet buffer work-around + * on link down event; reset the controller to flush + * the Rx packet buffer. + */ if (adapter->flags & FLAG_RX_NEEDS_RESTART) - schedule_work(&adapter->reset_task); + adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, - LINK_TIMEOUT); + LINK_TIMEOUT); } } @@ -4551,20 +5085,23 @@ link_up: adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); - e1000e_update_adaptive(&adapter->hw); - + /* If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. + */ if (!netif_carrier_ok(netdev) && - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; + + /* If reset is necessary, do it outside of interrupt context. */ + if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return; } + e1000e_update_adaptive(&adapter->hw); + /* Simple mode for Interrupt Throttle Rate (ITR) */ if (adapter->itr_setting == 4) { /* Symmetric Tx/Rx gets a reduced ITR=2000; @@ -4573,8 +5110,8 @@ link_up: */ u32 goc = (adapter->gotc + adapter->gorc) / 10000; u32 dif = (adapter->gotc > adapter->gorc ? - adapter->gotc - adapter->gorc : - adapter->gorc - adapter->gotc) / 10000; + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; e1000e_write_itr(adapter, itr); @@ -4601,6 +5138,17 @@ link_up: if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) e1000e_check_82574_phy_workaround(adapter); + /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ + if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { + er32(RXSTMPH); + adapter->rx_hwtstamp_cleared++; + } else { + adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; + } + } + /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, @@ -4612,6 +5160,7 @@ link_up: #define E1000_TX_FLAGS_TSO 0x00000004 #define E1000_TX_FLAGS_IPV4 0x00000008 #define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_HWTSTAMP 0x00000020 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 @@ -4623,16 +5172,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) u32 cmd_length = 0; u16 ipcse = 0, mss; u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; if (!skb_is_gso(skb)) return 0; - if (skb_header_cloned(skb)) { - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - - if (err) - return err; - } + err = skb_cow_head(skb, 0); + if (err < 0) + return err; hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; @@ -4641,14 +5188,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); + 0, IPPROTO_TCP, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); ipcse = 0; } ipcss = skb_network_offset(skb); @@ -4657,19 +5204,19 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | - E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); i = tx_ring->next_to_use; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; - context_desc->lower_setup.ip_fields.ipcss = ipcss; - context_desc->lower_setup.ip_fields.ipcso = ipcso; - context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); context_desc->upper_setup.tcp_fields.tucss = tucss; context_desc->upper_setup.tcp_fields.tucso = tucso; context_desc->upper_setup.tcp_fields.tucse = 0; - context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); @@ -4695,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) __be16 protocol; if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; + return false; if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; @@ -4727,8 +5274,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) context_desc->lower_setup.ip_config = 0; context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = - css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(cmd_len); @@ -4741,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) i = 0; tx_ring->next_to_use = i; - return 1; + return true; } static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, @@ -4801,7 +5347,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, - offset, size, DMA_TO_DEVICE); + offset, size, + DMA_TO_DEVICE); buffer_info->mapped_as_page = true; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; @@ -4850,7 +5397,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) if (tx_flags & E1000_TX_FLAGS_TSO) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | - E1000_TXD_CMD_TSE; + E1000_TXD_CMD_TSE; txd_upper |= E1000_TXD_POPTS_TXSM << 8; if (tx_flags & E1000_TX_FLAGS_IPV4) @@ -4870,14 +5417,19 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) txd_lower &= ~(E1000_TXD_CMD_IFCS); + if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_EXTCMD_TSTAMP; + } + i = tx_ring->next_to_use; do { buffer_info = &tx_ring->buffer_info[i]; tx_desc = E1000_TX_DESC(*tx_ring, i); tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->lower.data = - cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->lower.data = cpu_to_le32(txd_lower | + buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); i++; @@ -4915,24 +5467,23 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) { - struct e1000_hw *hw = &adapter->hw; + struct e1000_hw *hw = &adapter->hw; u16 length, offset; - if (vlan_tx_tag_present(skb)) { - if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && - (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) - return 0; - } + if (vlan_tx_tag_present(skb) && + !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) return 0; - if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) return 0; { - const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); struct udphdr *udp; if (ip->protocol != IPPROTO_UDP) @@ -5094,7 +5645,16 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, nr_frags); if (count) { - skb_tx_timestamp(skb); + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + !adapter->tx_hwtstamp_skb)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= E1000_TX_FLAGS_HWTSTAMP; + adapter->tx_hwtstamp_skb = skb_get(skb); + adapter->tx_hwtstamp_start = jiffies; + schedule_work(&adapter->tx_hwtstamp_work); + } else { + skb_tx_timestamp(skb); + } netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); @@ -5134,10 +5694,9 @@ static void e1000_reset_task(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; - if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && - (adapter->flags & FLAG_RX_RESTART_NOW))) { + if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); - e_err("Reset adapter\n"); + e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); } @@ -5150,7 +5709,7 @@ static void e1000_reset_task(struct work_struct *work) * Returns the address of the device statistics structure. **/ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + struct rtnl_link_stats64 *stats) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5171,18 +5730,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, * our own version based on RUC and ROC */ stats->rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; stats->rx_crc_errors = adapter->stats.crcerrs; stats->rx_frame_errors = adapter->stats.algnerrc; stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; + stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; stats->tx_aborted_errors = adapter->stats.ecol; stats->tx_window_errors = adapter->stats.latecol; stats->tx_carrier_errors = adapter->stats.tncrs; @@ -5203,7 +5759,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, static int e1000_change_mtu(struct net_device *netdev, int new_mtu) { struct e1000_adapter *adapter = netdev_priv(netdev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN; /* Jumbo frame support */ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && @@ -5233,8 +5789,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) adapter->max_frame_size = max_frame; e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; + + pm_runtime_get_sync(netdev->dev.parent); + if (netif_running(netdev)) - e1000e_down(adapter); + e1000e_down(adapter, true); /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next @@ -5251,15 +5810,17 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN - + ETH_FCS_LEN; + + ETH_FCS_LEN; if (netif_running(netdev)) e1000e_up(adapter); else e1000e_reset(adapter); + pm_runtime_put_sync(netdev->dev.parent); + clear_bit(__E1000_RESETTING, &adapter->state); return 0; @@ -5323,6 +5884,65 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, return 0; } +/** + * e1000e_hwtstamp_ioctl - control hardware time stamping + * @netdev: network interface device structure + * @ifreq: interface request + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int ret_val; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + ret_val = e1000e_config_hwtstamp(adapter, &config); + if (ret_val) + return ret_val; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* With V2 type filters which specify a Sync or Delay Request, + * Path Delay Request/Response messages are also time stamped + * by hardware so notify the caller the requested packets plus + * some others are time stamped. + */ + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + default: + break; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, + sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; +} + static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -5330,6 +5950,10 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: case SIOCSMIIREG: return e1000_mii_ioctl(netdev, ifr, cmd); + case SIOCSHWTSTAMP: + return e1000e_hwtstamp_set(netdev, ifr); + case SIOCGHWTSTAMP: + return e1000e_hwtstamp_get(netdev, ifr); default: return -EOPNOTSUPP; } @@ -5338,9 +5962,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) { struct e1000_hw *hw = &adapter->hw; - u32 i, mac_reg; + u32 i, mac_reg, wuc; u16 phy_reg, wuc_enable; - int retval = 0; + int retval; /* copy MAC RARs to PHY RARs */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); @@ -5375,7 +5999,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) phy_reg &= ~(BM_RCTL_MO_MASK); if (mac_reg & E1000_RCTL_MO_3) phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) - << BM_RCTL_MO_SHIFT); + << BM_RCTL_MO_SHIFT); if (mac_reg & E1000_RCTL_BAM) phy_reg |= BM_RCTL_BAM; if (mac_reg & E1000_RCTL_PMCF) @@ -5385,13 +6009,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) phy_reg |= BM_RCTL_RFCE; hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + wuc = E1000_WUC_PME_EN; + if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) + wuc |= E1000_WUC_APME; + /* enable PHY wakeup in MAC register */ ew32(WUFC, wufc); - ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); + ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | + E1000_WUC_PME_STATUS | wuc)); /* configure and enable PHY wakeup in PHY registers */ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); - hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); /* activate PHY wakeup */ wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; @@ -5404,16 +6033,10 @@ release: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, - bool runtime) +static int e1000e_pm_freeze(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; - int retval = 0; netif_device_detach(netdev); @@ -5424,14 +6047,28 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, usleep_range(10000, 20000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); - e1000e_down(adapter); + + /* Quiesce the device without resetting the hardware */ + e1000e_down(adapter, false); e1000_free_irq(adapter); } e1000e_reset_interrupt_capability(adapter); - retval = pci_save_state(pdev); - if (retval) - return retval; + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + return 0; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; status = er32(STATUS); if (status & E1000_STATUS_LU) @@ -5449,10 +6086,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, } ctrl = er32(CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; @@ -5467,12 +6100,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, ew32(CTRL_EXT, ctrl_ext); } + if (!runtime) + e1000e_power_up_phy(adapter); + if (adapter->flags & FLAG_IS_ICH) e1000_suspend_workarounds_ich8lan(&adapter->hw); - /* Allow time for pending master requests to run */ - e1000e_disable_pcie_master(&adapter->hw); - if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { /* enable wakeup by the PHY */ retval = e1000_init_phy_wakeup(adapter, wufc); @@ -5486,106 +6119,136 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, } else { ew32(WUC, 0); ew32(WUFC, 0); + + e1000_power_down_phy(adapter); } - *enable_wake = !!wufc; + if (adapter->hw.phy.type == e1000_phy_igp_3) { + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if (hw->mac.type == e1000_pch_lpt) { + if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ + retval = e1000_enable_ulp_lpt_lp(hw, !runtime); - /* make sure adapter isn't asleep if manageability is enabled */ - if ((adapter->flags & FLAG_MNG_PT_ENABLED) || - (hw->mac.ops.check_mng_mode(hw))) - *enable_wake = true; + if (retval) + return retval; + } - if (adapter->hw.phy.type == e1000_phy_igp_3) - e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ e1000e_release_hw_control(adapter); - pci_disable_device(pdev); - - return 0; -} - -static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) -{ - if (sleep && wake) { - pci_prepare_to_sleep(pdev); - return; - } - - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); -} - -static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, - bool wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); + pci_clear_master(pdev); /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the * downstream port of the pci-e switch. + * + * We don't have the associated upstream bridge while assigning + * the PCI device into guest. For example, the KVM on power is + * one of the cases. */ if (adapter->flags & FLAG_IS_QUAD_PORT) { struct pci_dev *us_dev = pdev->bus->self; u16 devctl; + if (!us_dev) + return 0; + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - e1000_power_off(pdev, sleep, wake); + pci_save_state(pdev); + pci_prepare_to_sleep(pdev); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); - } else { - e1000_power_off(pdev, sleep, wake); } + + return 0; } -#ifdef CONFIG_PCIEASPM -static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +/** + * e1000e_disable_aspm - Disable ASPM states + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) { + struct pci_dev *parent = pdev->bus->self; + u16 aspm_dis_mask = 0; + u16 pdev_aspmc, parent_aspmc; + + switch (state) { + case PCIE_LINK_STATE_L0S: + case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; + /* fall-through - can't have L1 without L0s */ + case PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; + break; + default: + return; + } + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; + + if (parent) { + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, + &parent_aspmc); + parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; + } + + /* Nothing to do if the ASPM states to be disabled already are */ + if (!(pdev_aspmc & aspm_dis_mask) && + (!parent || !(parent_aspmc & aspm_dis_mask))) + return; + + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? + "L0s" : "", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? + "L1" : ""); + +#ifdef CONFIG_PCIEASPM pci_disable_link_state_locked(pdev, state); -} -#else -static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) -{ - /* Both device and parent should have the same ASPM setting. - * Disable ASPM in downstream component first and then upstream. + + /* Double-check ASPM control. If not disabled by the above, the + * BIOS is preventing that from happening (or CONFIG_PCIEASPM is + * not enabled); override by writing PCI config space directly. */ - pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; - if (pdev->bus->self) - pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, - state); -} + if (!(aspm_dis_mask & pdev_aspmc)) + return; #endif -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) -{ - dev_info(&pdev->dev, "Disabling ASPM %s %s\n", - (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", - (state & PCIE_LINK_STATE_L1) ? "L1" : ""); - __e1000e_disable_aspm(pdev, state); -} + /* Both device and parent should have the same ASPM setting. + * Disable ASPM in downstream component first and then upstream. + */ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); -#ifdef CONFIG_PM -static bool e1000e_pm_ready(struct e1000_adapter *adapter) -{ - return !!adapter->tx_ring->buffer_info; + if (parent) + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + aspm_dis_mask); } +#ifdef CONFIG_PM static int __e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 aspm_disable_flag = 0; - u32 err; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) aspm_disable_flag = PCIE_LINK_STATE_L0S; @@ -5594,16 +6257,7 @@ static int __e1000_resume(struct pci_dev *pdev) if (aspm_disable_flag) e1000e_disable_aspm(pdev, aspm_disable_flag); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - - e1000e_set_interrupt_capability(adapter); - if (netif_running(netdev)) { - err = e1000_request_irq(adapter); - if (err) - return err; - } + pci_set_master(pdev); if (hw->mac.type >= e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); @@ -5617,24 +6271,25 @@ static int __e1000_resume(struct pci_dev *pdev) e1e_rphy(&adapter->hw, BM_WUS, &phy_data); if (phy_data) { e_info("PHY Wakeup cause - %s\n", - phy_data & E1000_WUS_EX ? "Unicast Packet" : - phy_data & E1000_WUS_MC ? "Multicast Packet" : - phy_data & E1000_WUS_BC ? "Broadcast Packet" : - phy_data & E1000_WUS_MAG ? "Magic Packet" : - phy_data & E1000_WUS_LNKC ? - "Link Status Change" : "other"); + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? + "Link Status Change" : "other"); } e1e_wphy(&adapter->hw, BM_WUS, ~0); } else { u32 wus = er32(WUS); + if (wus) { e_info("MAC Wakeup cause - %s\n", - wus & E1000_WUS_EX ? "Unicast Packet" : - wus & E1000_WUS_MC ? "Multicast Packet" : - wus & E1000_WUS_BC ? "Broadcast Packet" : - wus & E1000_WUS_MAG ? "Magic Packet" : - wus & E1000_WUS_LNKC ? "Link Status Change" : - "other"); + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); } ew32(WUS, ~0); } @@ -5643,11 +6298,6 @@ static int __e1000_resume(struct pci_dev *pdev) e1000_init_manageability_pt(adapter); - if (netif_running(netdev)) - e1000e_up(adapter); - - netif_device_attach(netdev); - /* If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. @@ -5659,94 +6309,116 @@ static int __e1000_resume(struct pci_dev *pdev) } #ifdef CONFIG_PM_SLEEP -static int e1000_suspend(struct device *dev) +static int e1000e_pm_thaw(struct device *dev) +{ + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000e_set_interrupt_capability(adapter); + if (netif_running(netdev)) { + u32 err = e1000_request_irq(adapter); + + if (err) + return err; + + e1000e_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} + +static int e1000e_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - retval = __e1000_shutdown(pdev, &wake, false); - if (!retval) - e1000_complete_shutdown(pdev, true, wake); + e1000e_pm_freeze(dev); - return retval; + return __e1000_shutdown(pdev, false); } -static int e1000_resume(struct device *dev) +static int e1000e_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); + int rc; - if (e1000e_pm_ready(adapter)) - adapter->idle_check = true; + rc = __e1000_resume(pdev); + if (rc) + return rc; - return __e1000_resume(pdev); + return e1000e_pm_thaw(dev); } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_RUNTIME -static int e1000_runtime_suspend(struct device *dev) +static int e1000e_pm_runtime_idle(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (e1000e_pm_ready(adapter)) { - bool wake; - - __e1000_shutdown(pdev, &wake, true); - } + if (!e1000e_has_link(adapter)) + pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); - return 0; + return -EBUSY; } -static int e1000_idle(struct device *dev) +static int e1000e_pm_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); + int rc; - if (!e1000e_pm_ready(adapter)) - return 0; + rc = __e1000_resume(pdev); + if (rc) + return rc; - if (adapter->idle_check) { - adapter->idle_check = false; - if (!e1000e_has_link(adapter)) - pm_schedule_suspend(dev, MSEC_PER_SEC); - } + if (netdev->flags & IFF_UP) + rc = e1000e_up(adapter); - return -EBUSY; + return rc; } -static int e1000_runtime_resume(struct device *dev) +static int e1000e_pm_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (!e1000e_pm_ready(adapter)) - return 0; + if (netdev->flags & IFF_UP) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + /* Down the device without resetting the hardware */ + e1000e_down(adapter, false); + } + + if (__e1000_shutdown(pdev, true)) { + e1000e_pm_runtime_resume(dev); + return -EBUSY; + } - adapter->idle_check = !dev->power.runtime_auto; - return __e1000_resume(pdev); + return 0; } #endif /* CONFIG_PM_RUNTIME */ #endif /* CONFIG_PM */ static void e1000_shutdown(struct pci_dev *pdev) { - bool wake = false; - - __e1000_shutdown(pdev, &wake, false); + e1000e_pm_freeze(&pdev->dev); - if (system_state == SYSTEM_POWER_OFF) - e1000_complete_shutdown(pdev, false, wake); + __e1000_shutdown(pdev, false); } #ifdef CONFIG_NET_POLL_CONTROLLER -static irqreturn_t e1000_intr_msix(int irq, void *data) +static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5797,7 +6469,7 @@ static void e1000_netpoll(struct net_device *netdev) e1000_intr_msi(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); break; - default: /* E1000E_INT_MODE_LEGACY */ + default: /* E1000E_INT_MODE_LEGACY */ disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); @@ -5826,7 +6498,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) - e1000e_down(adapter); + e1000e_down(adapter, true); pci_disable_device(pdev); /* Request a slot slot reset. */ @@ -5838,7 +6510,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the e1000_resume routine. + * resembles the first-half of the e1000e_pm_resume routine. */ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) { @@ -5862,9 +6534,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - pci_set_master(pdev); pdev->state_saved = true; pci_restore_state(pdev); + pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -5885,7 +6557,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the - * second-half of the e1000_resume routine. + * second-half of the e1000e_pm_resume routine. */ static void e1000_io_resume(struct pci_dev *pdev) { @@ -5910,7 +6582,6 @@ static void e1000_io_resume(struct pci_dev *pdev) */ if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_get_hw_control(adapter); - } static void e1000_print_device_info(struct e1000_adapter *adapter) @@ -5924,7 +6595,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) e_info("(PCI Express:2.5GT/s:%s) %pM\n", /* bus width */ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - "Width x1"), + "Width x1"), /* MAC address */ netdev->dev_addr); e_info("Intel(R) PRO/%s Network Connection\n", @@ -5964,7 +6635,7 @@ static int e1000_set_features(struct net_device *netdev, if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) adapter->flags |= FLAG_TSO_FORCE; - if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | + if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_RXALL))) return 0; @@ -6034,7 +6705,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) resource_size_t flash_start, flash_len; static int cards_found; u16 aspm_disable_flag = 0; - int i, err, pci_using_dac; + int bars, i, err, pci_using_dac; u16 eeprom_data = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; @@ -6050,26 +6721,21 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; + pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } } - err = pci_request_selected_regions_exclusive(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - e1000e_driver_name); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions_exclusive(pdev, bars, + e1000e_driver_name); if (err) goto err_pci_reg; @@ -6122,10 +6788,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_flashmap; } + /* Set default EEE advertisement */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + /* construct the net_device struct */ - netdev->netdev_ops = &e1000e_netdev_ops; + netdev->netdev_ops = &e1000e_netdev_ops; e1000e_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; + netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); @@ -6170,8 +6840,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Set initial default active device features */ netdev->features = (NETIF_F_SG | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | @@ -6185,7 +6855,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_RXALL; if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) - netdev->features |= NETIF_F_HW_VLAN_FILTER; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= (NETIF_F_SG | NETIF_F_TSO | @@ -6228,22 +6898,21 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "NVM Read Error while reading MAC address\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", - netdev->perm_addr); + netdev->dev_addr); err = -EIO; goto err_eeprom; } init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = e1000_watchdog; - adapter->watchdog_timer.data = (unsigned long) adapter; + adapter->watchdog_timer.data = (unsigned long)adapter; init_timer(&adapter->phy_info_timer); adapter->phy_info_timer.function = e1000_update_phy_info; - adapter->phy_info_timer.data = (unsigned long) adapter; + adapter->phy_info_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); @@ -6258,10 +6927,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->hw.fc.current_mode = e1000_fc_default; adapter->hw.phy.autoneg_advertised = 0x2f; - /* ring size defaults */ - adapter->rx_ring->count = E1000_DEFAULT_RXD; - adapter->tx_ring->count = E1000_DEFAULT_TXD; - /* Initial Wake on LAN setting - If APM wake is enabled in * the EEPROM, enable the ACPI Magic Packet filter */ @@ -6295,7 +6960,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + device_wakeup_enable(&pdev->dev); /* save off EEPROM version number */ e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); @@ -6318,6 +6987,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + /* init PTP hardware clock */ + e1000e_ptp_init(adapter); + e1000_print_device_info(adapter); if (pci_dev_run_wake(pdev)) @@ -6344,7 +7016,7 @@ err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -6366,6 +7038,8 @@ static void e1000_remove(struct pci_dev *pdev) struct e1000_adapter *adapter = netdev_priv(netdev); bool down = test_bit(__E1000_DOWN, &adapter->state); + e1000e_ptp_remove(adapter); + /* The timers may be rescheduled, so explicitly disable them * from being rescheduled. */ @@ -6380,8 +7054,13 @@ static void e1000_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); - if (!(netdev->flags & IFF_UP)) - e1000_power_down_phy(adapter); + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + cancel_work_sync(&adapter->tx_hwtstamp_work); + if (adapter->tx_hwtstamp_skb) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } + } /* Don't lie to e1000_close() down the road. */ if (!down) @@ -6404,7 +7083,7 @@ static void e1000_remove(struct pci_dev *pdev) if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_select_bars(pdev, IORESOURCE_MEM)); free_netdev(netdev); @@ -6421,11 +7100,12 @@ static const struct pci_error_handlers e1000_err_handler = { .resume = e1000_io_resume, }; -static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { +static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), + board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, @@ -6493,18 +7173,27 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); -#ifdef CONFIG_PM static const struct dev_pm_ops e1000_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) - SET_RUNTIME_PM_OPS(e1000_runtime_suspend, - e1000_runtime_resume, e1000_idle) -}; +#ifdef CONFIG_PM_SLEEP + .suspend = e1000e_pm_suspend, + .resume = e1000e_pm_resume, + .freeze = e1000e_pm_freeze, + .thaw = e1000e_pm_thaw, + .poweroff = e1000e_pm_suspend, + .restore = e1000e_pm_resume, #endif + SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, + e1000e_pm_runtime_idle) +}; /* PCI Device API Driver */ static struct pci_driver e1000_driver = { @@ -6512,11 +7201,9 @@ static struct pci_driver e1000_driver = { .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = e1000_remove, -#ifdef CONFIG_PM .driver = { .pm = &e1000_pm_ops, }, -#endif .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler }; @@ -6530,9 +7217,10 @@ static struct pci_driver e1000_driver = { static int __init e1000_init_module(void) { int ret; + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; @@ -6551,7 +7239,6 @@ static void __exit e1000_exit_module(void) } module_exit(e1000_exit_module); - MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL"); |
