aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1938
1 files changed, 1107 insertions, 831 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a417be7f8be..ca17af4349d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2007 Intel Corporation.
+ Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
the file called "COPYING".
Contact Information:
- Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -46,15 +45,14 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
- "Intel(R) 10 Gigabit PCI Express Network Driver";
+ "Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "1.3.18-k4"
+#define DRV_VERSION "1.3.30-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
-static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2007 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
- [board_82598] = &ixgbe_82598_info,
+ [board_82598] = &ixgbe_82598_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -74,15 +72,17 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
+ board_82598 },
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
- void *p);
+ void *p);
static struct notifier_block dca_notifier = {
.notifier_call = ixgbe_notify_dca,
.next = NULL,
@@ -104,7 +104,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
/* Let firmware take over control of h/w */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
}
static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -114,24 +114,11 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
/* Let firmware know the driver has taken over */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
-}
-
-#ifdef DEBUG
-/**
- * ixgbe_get_hw_dev_name - return device name string
- * used by hardware layer to print debugging information
- **/
-char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
-{
- struct ixgbe_adapter *adapter = hw->back;
- struct net_device *netdev = adapter->netdev;
- return netdev->name;
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
-#endif
static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
- u8 msix_vector)
+ u8 msix_vector)
{
u32 ivar, index;
@@ -144,13 +131,12 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
}
static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
- struct ixgbe_tx_buffer
- *tx_buffer_info)
+ struct ixgbe_tx_buffer
+ *tx_buffer_info)
{
if (tx_buffer_info->dma) {
- pci_unmap_page(adapter->pdev,
- tx_buffer_info->dma,
- tx_buffer_info->length, PCI_DMA_TODEVICE);
+ pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
+ tx_buffer_info->length, PCI_DMA_TODEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -161,107 +147,120 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- unsigned int eop,
- union ixgbe_adv_tx_desc *eop_desc)
+ struct ixgbe_ring *tx_ring,
+ unsigned int eop)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 head, tail;
+
/* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
+ * check with the clearing of time_stamp and movement of eop */
+ head = IXGBE_READ_REG(hw, tx_ring->head);
+ tail = IXGBE_READ_REG(hw, tx_ring->tail);
adapter->detect_tx_hung = false;
- if (tx_ring->tx_buffer_info[eop].dma &&
+ if ((head != tail) &&
+ tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
!(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
/* detected Tx unit hang */
+ union ixgbe_adv_tx_desc *tx_desc;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n"
" next_to_clean <%x>\n"
"tx_buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- readl(adapter->hw.hw_addr + tx_ring->head),
- readl(adapter->hw.hw_addr + tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->tx_buffer_info[eop].time_stamp,
- eop, jiffies, eop_desc->wb.status);
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ head, tail,
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
return true;
}
return false;
}
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
(((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+
+#define GET_TX_HEAD_FROM_RING(ring) (\
+ *(volatile u32 *) \
+ ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
+static void ixgbe_tx_timeout(struct net_device *netdev);
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ * @tx_ring: tx ring to clean
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
- struct net_device *netdev = adapter->netdev;
- union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+ union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int i, eop;
- bool cleaned = false;
- unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 head, oldhead;
+ unsigned int count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ rmb();
+ head = GET_TX_HEAD_FROM_RING(tx_ring);
+ head = le32_to_cpu(head);
i = tx_ring->next_to_clean;
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
- while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
- cleaned = false;
- while (!cleaned) {
+ while (1) {
+ while (i != head) {
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- cleaned = (i == eop);
+ skb = tx_buffer_info->skb;
- tx_ring->stats.bytes += tx_buffer_info->length;
- if (cleaned) {
- struct sk_buff *skb = tx_buffer_info->skb;
+ if (skb) {
unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
segs = skb_shinfo(skb)->gso_segs ?: 1;
/* multiply data chunks by size of headers */
bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_tx_packets += segs;
- total_tx_bytes += bytecount;
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
}
+
ixgbe_unmap_and_free_tx_resource(adapter,
- tx_buffer_info);
- tx_desc->wb.status = 0;
+ tx_buffer_info);
i++;
if (i == tx_ring->count)
i = 0;
- }
-
- tx_ring->stats.packets++;
-
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
-
- /* weight of a sort for tx, avoid endless transmit cleanup */
- if (total_tx_packets >= tx_ring->work_limit)
- break;
- }
+ count++;
+ if (count == tx_ring->count)
+ goto done_cleaning;
+ }
+ oldhead = head;
+ rmb();
+ head = GET_TX_HEAD_FROM_RING(tx_ring);
+ head = le32_to_cpu(head);
+ if (head == oldhead)
+ goto done_cleaning;
+ } /* while (1) */
+
+done_cleaning:
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (total_tx_packets && netif_carrier_ok(netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -269,59 +268,68 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
- adapter->restart_queue++;
+ ++adapter->restart_queue;
}
}
- if (adapter->detect_tx_hung)
- if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
- netif_stop_subqueue(netdev, tx_ring->queue_index);
-
- if (total_tx_packets >= tx_ring->work_limit)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
+ if (adapter->detect_tx_hung) {
+ if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+ /* schedule immediate reset if we believe we hung */
+ DPRINTK(PROBE, INFO,
+ "tx hang %d detected, resetting adapter\n",
+ adapter->tx_timeout_count + 1);
+ ixgbe_tx_timeout(adapter->netdev);
+ }
+ }
- tx_ring->total_bytes += total_tx_bytes;
- tx_ring->total_packets += total_tx_packets;
- adapter->net_stats.tx_bytes += total_tx_bytes;
- adapter->net_stats.tx_packets += total_tx_packets;
- cleaned = total_tx_packets ? true : false;
- return cleaned;
+ /* re-arm the interrupt */
+ if ((total_packets >= tx_ring->work_limit) ||
+ (count == tx_ring->count))
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
+
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+ return (total_packets ? true : false);
}
-#ifdef CONFIG_DCA
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rxr)
+ struct ixgbe_ring *rx_ring)
{
u32 rxctrl;
int cpu = get_cpu();
- int q = rxr - adapter->rx_ring;
+ int q = rx_ring - adapter->rx_ring;
- if (rxr->cpu != cpu) {
+ if (rx_ring->cpu != cpu) {
rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca_get_tag(cpu);
+ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
- rxr->cpu = cpu;
+ rx_ring->cpu = cpu;
}
put_cpu();
}
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *txr)
+ struct ixgbe_ring *tx_ring)
{
u32 txctrl;
int cpu = get_cpu();
- int q = txr - adapter->tx_ring;
+ int q = tx_ring - adapter->tx_ring;
- if (txr->cpu != cpu) {
+ if (tx_ring->cpu != cpu) {
txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca_get_tag(cpu);
+ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
- txr->cpu = cpu;
+ tx_ring->cpu = cpu;
}
put_cpu();
}
@@ -351,11 +359,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
switch (event) {
case DCA_PROVIDER_ADD:
- adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ /* if we're already enabled, don't do it again */
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ break;
/* Always use CB2 mode, difference is masked
* in the CB driver. */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
if (dca_add_requester(dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
ixgbe_setup_dca(adapter);
break;
}
@@ -372,7 +383,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
return 0;
}
-#endif /* CONFIG_DCA */
+#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
/**
* ixgbe_receive_skb - Send a completed packet up the stack
* @adapter: board private structure
@@ -382,8 +393,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
* @rx_desc: rx descriptor
**/
static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
- struct sk_buff *skb, u8 status,
- struct ixgbe_ring *ring,
+ struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
@@ -420,14 +431,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
* @skb: skb currently being received and modified
**/
static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
- u32 status_err,
- struct sk_buff *skb)
+ u32 status_err, struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
- /* Ignore Checksum bit is set, or rx csum disabled */
- if ((status_err & IXGBE_RXD_STAT_IXSM) ||
- !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
return;
/* if IP and error */
@@ -455,37 +464,44 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
* @adapter: address of board private structure
**/
static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count)
+ struct ixgbe_ring *rx_ring,
+ int cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
- struct ixgbe_rx_buffer *rx_buffer_info;
- struct sk_buff *skb;
+ struct ixgbe_rx_buffer *bi;
unsigned int i;
- unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
+ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
- if (!rx_buffer_info->page &&
- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
- rx_buffer_info->page = alloc_page(GFP_ATOMIC);
- if (!rx_buffer_info->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
+ if (!bi->page_dma &&
+ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ bi->page_offset = 0;
+ } else {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= (PAGE_SIZE / 2);
}
- rx_buffer_info->page_dma =
- pci_map_page(pdev, rx_buffer_info->page,
- 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_offset,
+ (PAGE_SIZE / 2),
+ PCI_DMA_FROMDEVICE);
}
- if (!rx_buffer_info->skb) {
- skb = netdev_alloc_skb(netdev, bufsz);
+ if (!bi->skb) {
+ struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
if (!skb) {
adapter->alloc_rx_buff_failed++;
@@ -499,28 +515,25 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
*/
skb_reserve(skb, NET_IP_ALIGN);
- rx_buffer_info->skb = skb;
- rx_buffer_info->dma = pci_map_single(pdev, skb->data,
- bufsz,
- PCI_DMA_FROMDEVICE);
+ bi->skb = skb;
+ bi->dma = pci_map_single(pdev, skb->data, bufsz,
+ PCI_DMA_FROMDEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_desc->read.pkt_addr =
- cpu_to_le64(rx_buffer_info->page_dma);
- rx_desc->read.hdr_addr =
- cpu_to_le64(rx_buffer_info->dma);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
- rx_desc->read.pkt_addr =
- cpu_to_le64(rx_buffer_info->dma);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
}
i++;
if (i == rx_ring->count)
i = 0;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ bi = &rx_ring->rx_buffer_info[i];
}
+
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
@@ -538,46 +551,54 @@ no_buffers:
}
}
+static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int *work_done, int work_to_do)
+ struct ixgbe_ring *rx_ring,
+ int *work_done, int work_to_do)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
unsigned int i;
- u32 upper_len, len, staterr;
+ u32 len, staterr;
u16 hdr_info;
bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
- upper_len = 0;
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
if (*work_done >= work_to_do)
break;
(*work_done)++;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- hdr_info =
- le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
- len =
- ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT);
+ hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
+ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
if (hdr_info & IXGBE_RXDADV_SPH)
adapter->rx_hdr_split++;
if (len > IXGBE_RX_HDR_SIZE)
len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- } else
+ } else {
len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
cleaned = true;
skb = rx_buffer_info->skb;
@@ -586,18 +607,25 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
if (len && !skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, rx_buffer_info->dma,
- adapter->rx_buf_len + NET_IP_ALIGN,
- PCI_DMA_FROMDEVICE);
+ rx_ring->rx_buf_len + NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
skb_put(skb, len);
}
if (upper_len) {
pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page, 0, upper_len);
- rx_buffer_info->page = NULL;
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+ (page_count(rx_buffer_info->page) != 1))
+ rx_buffer_info->page = NULL;
+ else
+ get_page(rx_buffer_info->page);
skb->len += upper_len;
skb->data_len += upper_len;
@@ -620,6 +648,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
+ next_buffer->dma = 0;
adapter->non_eop_descs++;
goto next_desc;
}
@@ -635,9 +664,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
total_rx_bytes += skb->len;
total_rx_packets++;
- skb->protocol = eth_type_trans(skb, netdev);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
- netdev->last_rx = jiffies;
+ adapter->netdev->last_rx = jiffies;
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -666,9 +695,6 @@ next_desc:
if (cleaned_count)
ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
- adapter->net_stats.rx_bytes += total_rx_bytes;
- adapter->net_stats.rx_packets += total_rx_packets;
-
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -700,43 +726,43 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
q_vector = &adapter->q_vector[v_idx];
/* XXX for_each_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
- adapter->num_rx_queues);
+ adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
j = adapter->rx_ring[r_idx].reg_idx;
ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
- adapter->num_rx_queues,
- r_idx + 1);
+ adapter->num_rx_queues,
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->txr_idx,
- adapter->num_tx_queues);
+ adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
j = adapter->tx_ring[r_idx].reg_idx;
ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
- adapter->num_tx_queues,
- r_idx + 1);
+ adapter->num_tx_queues,
+ r_idx + 1);
}
- /* if this is a tx only vector use half the irq (tx) rate */
+ /* if this is a tx only vector halve the interrupt rate */
if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = adapter->tx_eitr;
+ q_vector->eitr = (adapter->eitr_param >> 1);
else
- /* rx only or mixed */
- q_vector->eitr = adapter->rx_eitr;
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
- EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+ EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
}
ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
- /* set up to autoclear timer, lsc, and the vectors */
+ /* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~IXGBE_EIMS_OTHER;
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -766,8 +792,8 @@ enum latency_range {
* parameter (see ixgbe_param.c)
**/
static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
{
unsigned int retval = itr_setting;
u32 timepassed_us;
@@ -814,40 +840,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
u32 new_itr;
u8 current_itr, ret_itr;
int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
- sizeof(struct ixgbe_q_vector);
+ sizeof(struct ixgbe_q_vector);
struct ixgbe_ring *rx_ring, *tx_ring;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
tx_ring = &(adapter->tx_ring[r_idx]);
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->tx_eitr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
- q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
- q_vector->tx_eitr - 1 : ret_itr);
+ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+ q_vector->tx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = &(adapter->rx_ring[r_idx]);
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->rx_eitr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
- q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
- q_vector->rx_eitr - 1 : ret_itr);
+ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+ q_vector->rx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
- current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
+ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
@@ -871,13 +897,27 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
/* must write high and low 16 bits to reset counter */
DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
- itr_reg);
+ itr_reg);
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
}
return;
}
+
+static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->lsc_int++;
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+ schedule_work(&adapter->watchdog_task);
+ }
+}
+
static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
{
struct net_device *netdev = data;
@@ -885,11 +925,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
- if (eicr & IXGBE_EICR_LSC) {
- adapter->lsc_int++;
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies);
- }
+ if (eicr & IXGBE_EICR_LSC)
+ ixgbe_check_lsc(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -901,7 +938,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
{
struct ixgbe_q_vector *q_vector = data;
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *txr;
+ struct ixgbe_ring *tx_ring;
int i, r_idx;
if (!q_vector->txr_count)
@@ -909,16 +946,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- txr = &(adapter->tx_ring[r_idx]);
-#ifdef CONFIG_DCA
+ tx_ring = &(adapter->tx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, txr);
+ ixgbe_update_tx_dca(adapter, tx_ring);
#endif
- txr->total_bytes = 0;
- txr->total_packets = 0;
- ixgbe_clean_tx_irq(adapter, txr);
+ tx_ring->total_bytes = 0;
+ tx_ring->total_packets = 0;
+ ixgbe_clean_tx_irq(adapter, tx_ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
return IRQ_HANDLED;
@@ -933,18 +970,26 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
{
struct ixgbe_q_vector *q_vector = data;
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *rxr;
+ struct ixgbe_ring *rx_ring;
int r_idx;
+ int i;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring->total_bytes = 0;
+ rx_ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
if (!q_vector->rxr_count)
return IRQ_HANDLED;
- rxr = &(adapter->rx_ring[r_idx]);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
/* disable interrupts on this vector only */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
- rxr->total_bytes = 0;
- rxr->total_packets = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
netif_rx_schedule(adapter->netdev, &q_vector->napi);
return IRQ_HANDLED;
@@ -963,39 +1008,90 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
**/
static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *rxr;
+ struct ixgbe_ring *rx_ring = NULL;
int work_done = 0;
long r_idx;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rxr = &(adapter->rx_ring[r_idx]);
-#ifdef CONFIG_DCA
+ rx_ring = &(adapter->rx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, rxr);
+ ixgbe_update_rx_dca(adapter, rx_ring);
#endif
- ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
+ ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
netif_rx_complete(adapter->netdev, napi);
- if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
+ if (adapter->itr_setting & 3)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
}
return work_done;
}
+/**
+ * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+ struct ixgbe_q_vector *q_vector =
+ container_of(napi, struct ixgbe_q_vector, napi);
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *rx_ring = NULL;
+ int work_done = 0, i;
+ long r_idx;
+ u16 enable_mask = 0;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ budget /= (q_vector->rxr_count ?: 1);
+ budget = max(budget, 1);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_rx_dca(adapter, rx_ring);
+#endif
+ ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
+ enable_mask |= rx_ring->v_idx;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ netif_rx_complete(adapter->netdev, napi);
+ if (adapter->itr_setting & 3)
+ ixgbe_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+ return 0;
+ }
+
+ return work_done;
+}
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
+ int r_idx)
{
a->q_vector[v_idx].adapter = a;
set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
@@ -1004,7 +1100,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
+ int r_idx)
{
a->q_vector[v_idx].adapter = a;
set_bit(r_idx, a->q_vector[v_idx].txr_idx);
@@ -1024,7 +1120,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* mapping configurations in here.
**/
static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+ int vectors)
{
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
@@ -1101,28 +1197,28 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
goto out;
#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
- &ixgbe_msix_clean_many)
+ (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+ &ixgbe_msix_clean_many)
for (vector = 0; vector < q_vectors; vector++) {
handler = SET_HANDLER(&adapter->q_vector[vector]);
sprintf(adapter->name[vector], "%s:v%d-%s",
- netdev->name, vector,