diff options
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r-- | drivers/net/e1000e/netdev.c | 866 |
1 files changed, 692 insertions, 174 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index dbf81788bb4..24507f3b8b1 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -26,6 +26,8 @@ *******************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> @@ -45,11 +47,12 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/pm_qos_params.h> +#include <linux/pm_runtime.h> #include <linux/aer.h> #include "e1000.h" -#define DRV_VERSION "1.0.2-k2" +#define DRV_VERSION "1.0.2-k4" char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pchlan] = &e1000_pch_info, }; +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ + +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ + +static const struct e1000_reg_info e1000_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN, "RDLEN"}, + {E1000_RDH, "RDH"}, + {E1000_RDT, "RDT"}, + {E1000_RDTR, "RDTR"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_ERT, "ERT"}, + {E1000_RDBAL, "RDBAL"}, + {E1000_RDBAH, "RDBAH"}, + {E1000_RDFH, "RDFH"}, + {E1000_RDFT, "RDFT"}, + {E1000_RDFHS, "RDFHS"}, + {E1000_RDFTS, "RDFTS"}, + {E1000_RDFPC, "RDFPC"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL, "TDBAL"}, + {E1000_TDBAH, "TDBAH"}, + {E1000_TDLEN, "TDLEN"}, + {E1000_TDH, "TDH"}, + {E1000_TDT, "TDT"}, + {E1000_TIDV, "TIDV"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TADV, "TADV"}, + {E1000_TARC(0), "TARC"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFTS, "TDFTS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* + * e1000_regdump - register printout routine + */ +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_RXDCTL(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TXDCTL(n)); + break; + case E1000_TARC(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TARC(n)); + break; + default: + printk(KERN_INFO "%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); + printk(KERN_INFO "%-15s ", rname); + for (n = 0; n < 2; n++) + printk(KERN_CONT "%08x ", regs[n]); + printk(KERN_CONT "\n"); +} + + +/* + * e1000e_dump - Print registers, tx-ring and rx-ring + */ +static void e1000e_dump(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_reg_info *reginfo; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct e1000_buffer *buffer_info; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc_ps; + struct e1000_rx_desc *rx_desc; + struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + printk(KERN_INFO " Register Name Value\n"); + for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; + reginfo->name; reginfo++) { + e1000_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" + " leng ntw timestamp\n"); + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Legacy format\n"); + printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Context format\n"); + printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Data format\n"); + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " + "%04X %3X %016llX %p", + (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, + le64_to_cpu(u0->a), le64_to_cpu(u0->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, (u64)buffer_info->time_stamp, + buffer_info->skb); + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC/U\n"); + else if (i == tx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, phys_to_virt(buffer_info->dma), + buffer_info->length, true); + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); + printk(KERN_INFO " %5d %5X %5X\n", 0, + rx_ring->next_to_use, rx_ring->next_to_clean); + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + switch (adapter->rx_ps_pages) { + case 1: + case 2: + case 3: + /* [Extended] Packet Split Receive Descriptor Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address 0 [63:0] | + * +-----------------------------------------------------+ + * 8 | Buffer Address 1 [63:0] | + * +-----------------------------------------------------+ + * 16 | Buffer Address 2 [63:0] | + * +-----------------------------------------------------+ + * 24 | Buffer Address 3 [63:0] | + * +-----------------------------------------------------+ + */ + printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " + "[buffer 1 63:0 ] " + "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext Pkt Split format\n"); + /* [Extended] Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 13 12 8 7 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | + * | Checksum | Ident | | Queue | | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " + "[vl l0 ee es] " + "[ l3 l2 l1 hs] [reserved ] ---------------- " + "[bi->skb] <-- Ext Rx Write-Back format\n"); + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc_ps; + staterr = + le32_to_cpu(rx_desc_ps->wb.middle.status_error); + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX %016llX %016llX " + "---------------- %p", i, + le64_to_cpu(u1->a), + le64_to_cpu(u1->b), + le64_to_cpu(u1->c), + le64_to_cpu(u1->d), + buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %016llX %016llX %p", i, + le64_to_cpu(u1->a), + le64_to_cpu(u1->b), + le64_to_cpu(u1->c), + le64_to_cpu(u1->d), + (u64)buffer_info->dma, + buffer_info->skb); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_ps_bsize0, true); + } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + } + break; + default: + case 0: + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + printk(KERN_INFO "Rl[desc] [address 63:0 ] " + "[vl er S cks ln] [bi->dma ] [bi->skb] " + "<-- Legacy format\n"); + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + u0 = (struct my_u0 *)rx_desc; + printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " + "%016llX %p", + i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), + (u64)buffer_info->dma, buffer_info->skb); + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, phys_to_virt(buffer_info->dma), + adapter->rx_buffer_len, true); + } + } + +exit: + return; +} + /** * e1000_desc_unused - calculate if we have unused descriptors **/ @@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, buffer_info->skb = skb; map_skb: - buffer_info->dma = pci_map_single(pdev, skb->data, + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_err(&pdev->dev, "RX DMA map failed\n"); adapter->rx_dma_failed++; break; @@ -190,26 +548,23 @@ map_skb: rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } i++; if (i == rx_ring->count) i = 0; buffer_info = &rx_ring->buffer_info[i]; } - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - if (i-- == 0) - i = (rx_ring->count - 1); - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); - } + rx_ring->next_to_use = i; } /** @@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, adapter->alloc_rx_buff_failed++; goto no_buffers; } - ps_page->dma = pci_map_page(pdev, - ps_page->page, - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, ps_page->dma)) { + ps_page->dma = dma_map_page(&pdev->dev, + ps_page->page, + 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + ps_page->dma)) { dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); adapter->rx_dma_failed++; @@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, } buffer_info->skb = skb; - buffer_info->dma = pci_map_single(pdev, skb->data, + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, adapter->rx_ps_bsize0, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_err(&pdev->dev, "RX DMA map failed\n"); adapter->rx_dma_failed++; /* cleanup skb */ @@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); + } + i++; if (i == rx_ring->count) i = 0; @@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, } no_buffers: - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - - if (!(i--)) - i = (rx_ring->count - 1); - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - /* - * Hardware increments by 16 bytes, but packet split - * descriptors are 32 bytes...so we increment tail - * twice as much. - */ - writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); - } + rx_ring->next_to_use = i; } /** @@ -366,10 +714,10 @@ check_page: } if (!buffer_info->dma) - buffer_info->dma = pci_map_page(pdev, + buffer_info->dma = dma_map_page(&pdev->dev, buffer_info->page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); @@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, cleaned = 1; cleaned_count++; - pci_unmap_single(pdev, + dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter, { if (buffer_info->dma) { if (buffer_info->mapped_as_page) - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); else - pci_unmap_single(adapter->pdev, buffer_info->dma, - buffer_info->length, - PCI_DMA_TODEVICE); + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { @@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) cleaned = (i == eop); if (cleaned) { - struct sk_buff *skb = buffer_info->skb; - unsigned int segs, bytecount; - segs = skb_shinfo(skb)->gso_segs ?: 1; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_tx_packets += segs; - total_tx_bytes += bytecount; + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; } e1000_put_txbuf(adapter, buffer_info); @@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, cleaned = 1; cleaned_count++; - pci_unmap_single(pdev, buffer_info->dma, + dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); buffer_info->dma = 0; /* see !EOP comment in other rx routine */ @@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, * kmap_atomic, so we can't hold the mapping * very long */ - pci_dma_sync_single_for_cpu(pdev, ps_page->dma, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, + PAGE_SIZE, DMA_FROM_DEVICE); vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); memcpy(skb_tail_pointer(skb), vaddr, l1); kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); - pci_dma_sync_single_for_device(pdev, ps_page->dma, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&pdev->dev, ps_page->dma, + PAGE_SIZE, DMA_FROM_DEVICE); /* remove the CRC */ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) @@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, break; ps_page = &buffer_info->ps_pages[j]; - pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); ps_page->dma = 0; skb_fill_page_desc(skb, j, ps_page->page, 0, length); ps_page->page = NULL; @@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; - pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, + DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { if (adapter->clean_rx == e1000_clean_rx_irq) - pci_unmap_single(pdev, buffer_info->dma, + dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) - pci_unmap_page(pdev, buffer_info->dma, + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) - pci_unmap_single(pdev, buffer_info->dma, + dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); buffer_info->dma = 0; } @@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) ps_page = &buffer_info->ps_pages[j]; if (!ps_page->page) break; - pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); ps_page->dma = 0; put_page(ps_page->page); ps_page->page = NULL; @@ -1426,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) pci_disable_msi(adapter->pdev); adapter->flags &= ~FLAG_MSI_ENABLED; } - - return; } /** @@ -1479,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) /* Don't do anything; this is the system default */ break; } - - return; } /** @@ -2185,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter) } } -static void e1000_init_manageability(struct e1000_adapter *adapter) +static void e1000_init_manageability_pt(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 manc, manc2h; + u32 manc, manc2h, mdef, i, j; if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) return; @@ -2202,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter) */ manc |= E1000_MANC_EN_MNG2HOST; manc2h = er32(MANC2H); -#define E1000_MNG2HOST_PORT_623 (1 << 5) -#define E1000_MNG2HOST_PORT_664 (1 << 6) - manc2h |= E1000_MNG2HOST_PORT_623; - manc2h |= E1000_MNG2HOST_PORT_664; + + switch (hw->mac.type) { + default: + manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); + break; + case e1000_82574: + case e1000_82583: + /* + * Check if IPMI pass-through decision filter already exists; + * if so, enable it. + */ + for (i = 0, j = 0; i < 8; i++) { + mdef = er32(MDEF(i)); + + /* Ignore filters with anything other than IPMI ports */ + if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + continue; + + /* Enable this decision filter in MANC2H */ + if (mdef) + manc2h |= (1 << i); + + j |= mdef; + } + + if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + break; + + /* Create new decision filter in an empty filter */ + for (i = 0, j = 0; i < 8; i++) + if (er32(MDEF(i)) == 0) { + ew32(MDEF(i), (E1000_MDEF_PORT_623 | + E1000_MDEF_PORT_664)); + manc2h |= (1 << 1); + j++; + break; + } + + if (!j) + e_warn("Unable to create IPMI pass-through filter\n"); + break; + } + ew32(MANC2H, manc2h); ew32(MANC, manc); } @@ -2524,12 +2900,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * excessive C-state transition latencies result in * dropped transactions. */ - pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, - adapter->netdev->name, 55); + pm_qos_update_request( + adapter->netdev->pm_qos_req, 55); } else { - pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, - adapter->netdev->name, - PM_QOS_DEFAULT_VALUE); + pm_qos_update_request( + adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); } } @@ -2565,7 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 *mta_list; u32 rctl; int i; @@ -2597,9 +2973,8 @@ static void e1000_set_multi(struct net_device *netdev) /* prepare a packed array of only addresses. */ i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), - mc_ptr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); e1000_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); @@ -2621,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000_set_multi(adapter->netdev); e1000_restore_vlan(adapter); - e1000_init_manageability(adapter); + e1000_init_manageability_pt(adapter); e1000_configure_tx(adapter); e1000_setup_rctl(adapter); @@ -2755,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter) fc->high_water = 0x5000; fc->low_water = 0x3000; } + fc->refresh_time = 0x1000; } else { if ((adapter->flags & FLAG_HAS_ERT) && (adapter->netdev->mtu > ETH_DATA_LEN)) @@ -2792,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter) if (mac->ops.init_hw(hw)) e_err("Hardware Error\n"); - /* additional part of the flow-control workaround above */ - if (hw->mac.type == e1000_pchlan) - ew32(FCRTV_PCH, 0x1000); - e1000_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ @@ -2824,8 +3196,8 @@ int e1000e_up(struct e1000_adapter *adapter) /* DMA latency requirement to workaround early-receive/jumbo issue */ if (adapter->flags & FLAG_HAS_ERT) - pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, - adapter->netdev->name, + adapter->netdev->pm_qos_req = + pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); /* hardware has been reset, we need to reload some things */ @@ -2841,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter) netif_wake_queue(adapter->netdev); /* fire a link change interrupt to start the watchdog */ - ew32(ICS, E1000_ICS_LSC); + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + return 0; } @@ -2887,9 +3263,11 @@ void e1000e_down(struct e1000_adapter *adapter) e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); - if (adapter->flags & FLAG_HAS_ERT) - pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, - adapter->netdev->name); + if (adapter->flags & FLAG_HAS_ERT) { + pm_qos_remove_request( + adapter->netdev->pm_qos_req); + adapter->netdev->pm_qos_req = NULL; + } /* * TODO: for power management, we could drop the link and @@ -3083,12 +3461,15 @@ static int e1000_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; int err; /* disallow open during test */ if (test_bit(__E1000_TESTING, &adapter->state)) return -EBUSY; + pm_runtime_get_sync(&pdev->dev); + netif_carrier_off(netdev); /* allocate transmit descriptors */ @@ -3101,6 +3482,15 @@ static int e1000_open(struct net_device *netdev) if (err) goto err_setup_rx; + /* + * If AMT is enabled, let the firmware know that the network + * interface is now open and reset the part to a known state. + */ + if (adapter->flags & FLAG_HAS_AMT) { + e1000_get_hw_control(adapter); + e1000e_reset(adapter); + } + e1000e_power_up_phy(adapter); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; @@ -3109,13 +3499,6 @@ static int e1000_open(struct net_device *netdev) e1000_update_mng_vlan(adapter); /* - * If AMT is enabled, let the firmware know that the network - * interface is now open - */ - if (adapter->flags & FLAG_HAS_AMT) - e1000_get_hw_control(adapter); - - /* * before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our @@ -3149,8 +3532,14 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); + adapter->idle_check = true; + pm_runtime_put(&pdev->dev); + /* fire a link status change interrupt to start the watchdog */ - ew32(ICS, E1000_ICS_LSC); + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); return 0; @@ -3162,6 +3551,7 @@ err_setup_rx: e1000e_free_tx_resources(adapter); err_setup_tx: e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); return err; } @@ -3180,11 +3570,17 @@ err_setup_tx: static int e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); - e1000e_down(adapter); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter); + e1000_free_irq(adapter); + } e1000_power_down_phy(adapter); - e1000_free_irq(adapter); e1000e_free_tx_resources(adapter); e1000e_free_rx_resources(adapter); @@ -3206,6 +3602,8 @@ static int e1000_close(struct net_device *netdev) if (adapter->flags & FLAG_HAS_AMT) e1000_release_hw_control(adapter); + pm_runtime_put_sync(&pdev->dev); + return 0; } /** @@ -3550,6 +3948,9 @@ static void e1000_watchdog_task(struct work_struct *work) link = e1000e_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + e1000e_enable_receives(adapter); goto link_up; } @@ -3561,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { bool txb2b = 1; + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + /* update snapshot of PHY registers on LSC */ e1000_phy_read_status(adapter); mac->ops.get_link_up_info(&adapter->hw, @@ -3670,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work) if (adapter->flags & FLAG_RX_NEEDS_RESTART) schedule_work(&adapter->reset_task); + else + pm_schedule_suspend(netdev->dev.parent, + LINK_TIMEOUT); } } @@ -3705,6 +4113,22 @@ link_up: } } + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (adapter->itr_setting == 4) { + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotc + adapter->gorc) / 10000; + u32 dif = (adapter->gotc > adapter->gorc ? + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + /* Cause software interrupt to ensure Rx ring is cleaned */ if (adapter->msix_entries) ew32(ICS, adapter->rx_ring->ims_val); @@ -3879,7 +4303,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; unsigned int len = skb_headlen(skb); unsigned int offset = 0, size, count = 0, i; - unsigned int f; + unsigned int f, bytecount, segs; i = tx_ring->next_to_use; @@ -3890,10 +4314,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - buffer_info->dma = pci_map_single(pdev, skb->data + offset, - size, PCI_DMA_TODEVICE); + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); buffer_info->mapped_as_page = false; - if (pci_dma_mapping_error(pdev, buffer_info->dma)) + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; len -= size; @@ -3925,11 +4350,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - buffer_info->dma = pci_map_page(pdev, frag->page, + buffer_info->dma = dma_map_page(&pdev->dev, frag->page, offset, size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); buffer_info->mapped_as_page = true; - if (pci_dma_mapping_error(pdev, buffer_info->dma)) + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; len -= size; @@ -3938,7 +4363,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } } + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; tx_ring->buffer_info[first].next_to_watch = i; return count; @@ -4105,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, unsigned int max_per_txd = E1000_MAX_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned int len = skb->len - skb->data_len; + unsigned int len = skb_headlen(skb); unsigned int nr_frags; unsigned int mss; int count = 0; @@ -4155,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - len = skb->len - skb->data_len; + len = skb_headlen(skb); } } @@ -4241,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + e1000e_dump(adapter); + e_err("Reset adapter\n"); e1000e_reinit_locked(adapter); } @@ -4475,13 +4908,15 @@ out: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, ctrl_ext, rctl, status; - u32 wufc = adapter->wol; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; int retval = 0; netif_device_detach(netdev); @@ -4651,20 +5086,13 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) __e1000e_disable_aspm(pdev, state); } -#ifdef CONFIG_PM -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +#ifdef CONFIG_PM_OPS +static bool e1000e_pm_ready(struct e1000_adapter *adapter) { - int retval; - bool wake; - - retval = __e1000_shutdown(pdev, &wake); - if (!retval) - e1000_complete_shutdown(pdev, true, wake); - - return retval; + return !!adapter->tx_ring->buffer_info; } -static int e1000_resume(struct pci_dev *pdev) +static int __e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -4677,18 +5105,6 @@ static int e1000_resume(struct pci_dev *pdev) if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, - "Cannot enable PCI device from suspend\n"); - return err; - } - - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - e1000e_set_interrupt_capability(adapter); if (netif_running(netdev)) { err = e1000_request_irq(adapter); @@ -4729,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev) e1000e_reset(adapter); - e1000_init_manageability(adapter); + e1000_init_manageability_pt(adapter); if (netif_running(netdev)) e1000e_up(adapter); @@ -4746,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev) return 0; } -#endif + +#ifdef CONFIG_PM_SLEEP +static int e1000_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake, false); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); + + return retval; +} + +static int e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) + adapter->idle_check = true; + + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int e1000_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) { + bool wake; + + __e1000_shutdown(pdev, &wake, true); + } + + return 0; +} + +static int e1000_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + if (adapter->idle_check) { + adapter->idle_check = false; + if (!e1000e_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC); + } + + return -EBUSY; +} + +static int e1000_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + adapter->idle_check = !dev->power.runtime_auto; + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM_OPS */ static void e1000_shutdown(struct pci_dev *pdev) { bool wake = false; - __e1000_shutdown(pdev, &wake); + __e1000_shutdown(pdev, &wake, false); if (system_state == SYSTEM_POWER_OFF) e1000_complete_shutdown(pdev, false, wake); @@ -4826,8 +5317,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); + pdev->state_saved = true; pci_restore_state(pdev); - pci_save_state(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -4855,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - e1000_init_manageability(adapter); + e1000_init_manageability_pt(adapter); if (netif_running(netdev)) { if (e1000e_up(adapter)) { @@ -4968,16 +5459,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev, return err; pci_using_dac = 0; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); @@ -5008,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->irq = pdev->irq; + pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); hw = &adapter->hw; @@ -5228,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev, e1000_print_device_info(adapter); + if (pci_dev_run_wake(pdev)) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } + pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC); + return 0; err_register: @@ -5270,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); + bool down = test_bit(__E1000_DOWN, &adapter->state); + + pm_runtime_get_sync(&pdev->dev); /* * flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ - set_bit(__E1000_DOWN, &adapter->state); + if (!down) + set_bit(__E1000_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); @@ -5289,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev) if (!(netdev->flags & IFF_UP)) e1000_power_down_phy(adapter); + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); unregister_netdev(netdev); + if (pci_dev_run_wake(pdev)) { + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + } + pm_runtime_put_noidle(&pdev->dev); + /* * Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -5380,6 +5892,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, @@ -5390,16 +5903,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); +#ifdef CONFIG_PM_OPS +static const struct dev_pm_ops e1000_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) + SET_RUNTIME_PM_OPS(e1000_runtime_suspend, + e1000_runtime_resume, e1000_idle) +}; +#endif + /* PCI Device API Driver */ static struct pci_driver e1000_driver = { .name = e1000e_driver_name, .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = __devexit_p(e1000_remove), -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = e1000_suspend, - .resume = e1000_resume, +#ifdef CONFIG_PM_OPS + .driver.pm = &e1000_pm_ops, #endif .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler @@ -5414,10 +5933,9 @@ static struct pci_driver e1000_driver = { static int __init e1000_init_module(void) { int ret; - printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", - e1000e_driver_name, e1000e_driver_version); - printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", - e1000e_driver_name); + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; |