aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ixgbevf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c172
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h114
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1386
4 files changed, 698 insertions, 1002 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index e09a6cc633b..418af827b23 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -251,6 +251,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
@@ -264,32 +265,9 @@ struct ixgbe_adv_tx_context_desc {
/* Interrupt register bitmasks */
-/* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
-
-/* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_MAILBOX | \
- IXGBE_EIMS_OTHER)
-
#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
/* Error Codes */
#define IXGBE_ERR_INVALID_MAC_ADDR -1
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e8dddf572d3..8f2070439b5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -43,7 +43,6 @@
#define IXGBE_ALL_RAR_ENTRIES 16
-#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -75,21 +74,17 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)},
- {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
};
#define IXGBE_QUEUE_STATS_LEN 0
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
-#endif /* ETHTOOL_GSTATS */
-#ifdef ETHTOOL_TEST
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-#endif /* ETHTOOL_TEST */
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -289,13 +284,11 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbevf_ring *tx_ring = adapter->tx_ring;
- struct ixgbevf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IXGBEVF_MAX_RXD;
ring->tx_max_pending = IXGBEVF_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
@@ -303,33 +296,28 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
- int i, err = 0;
u32 new_rx_count, new_tx_count;
+ int i, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
- /* nothing to do */
+ new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count))
return 0;
- }
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
- /*
- * If the adapter isn't up and running then just set the
- * new parameters and scurry for the exits.
- */
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count;
@@ -340,82 +328,98 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
- tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!tx_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring) {
- err = -ENOMEM;
- goto err_rx_setup;
- }
-
- ixgbevf_down(adapter);
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
- memcpy(tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (err) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ /* clone ring and setup updated count */
+ tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_tx_resources(adapter,
- &tx_ring[i]);
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
}
- goto err_tx_ring_setup;
+
+ vfree(tx_ring);
+ tx_ring = NULL;
+
+ goto clear_reset;
}
- tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
- memcpy(rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ /* clone ring and setup updated count */
+ rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_rx_resources(adapter,
- &rx_ring[i]);
+ ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
}
- goto err_rx_ring_setup;
+
+ vfree(rx_ring);
+ rx_ring = NULL;
+
+ goto clear_reset;
}
- rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
- /*
- * Only switch to new rings if all the prior allocations
- * and ring setups have succeeded.
- */
- kfree(adapter->tx_ring);
- adapter->tx_ring = tx_ring;
- adapter->tx_ring_count = new_tx_count;
-
- kfree(adapter->rx_ring);
- adapter->rx_ring = rx_ring;
- adapter->rx_ring_count = new_rx_count;
+ /* bring interface down to prepare for update */
+ ixgbevf_down(adapter);
- /* success! */
- ixgbevf_up(adapter);
+ /* Tx */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+ adapter->tx_ring[i] = tx_ring[i];
+ }
+ adapter->tx_ring_count = new_tx_count;
- goto clear_reset;
+ vfree(tx_ring);
+ tx_ring = NULL;
+ }
-err_rx_ring_setup:
- for(i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ /* Rx */
+ if (rx_ring) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+ adapter->rx_ring[i] = rx_ring[i];
+ }
+ adapter->rx_ring_count = new_rx_count;
-err_tx_ring_setup:
- kfree(rx_ring);
+ vfree(rx_ring);
+ rx_ring = NULL;
+ }
-err_rx_setup:
- kfree(tx_ring);
+ /* restore interface using new values */
+ ixgbevf_up(adapter);
clear_reset:
+ /* free Tx resources if Rx error is encountered */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ vfree(tx_ring);
+ }
+
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
return err;
}
@@ -674,10 +678,8 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- if (!adapter->dev_closed)
- ixgbevf_reinit_locked(adapter);
- }
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 0a1b99240d4..98cadb0c4da 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -52,12 +52,12 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- dma_addr_t page_dma;
- unsigned int page_offset;
};
struct ixgbevf_ring {
+ struct ixgbevf_ring *next;
+ struct net_device *netdev;
+ struct device *dev;
struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -83,29 +83,9 @@ struct ixgbevf_ring {
* offset associated with this ring, which is different
* for DCB and RSS modes */
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- /* cpu for tx queue */
- int cpu;
-#endif
-
- u64 v_idx; /* maps directly to the index for this ring in the hardware
- * vector array, can also be used for finding the bit in EICR
- * and friends that represents the vector for this ring */
-
- u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
};
-enum ixgbevf_ring_f_enum {
- RING_F_NONE = 0,
- RING_F_ARRAY_SIZE /* must be last in enum set */
-};
-
-struct ixgbevf_ring_feature {
- int indices;
- int mask;
-};
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -120,8 +100,6 @@ struct ixgbevf_ring_feature {
#define IXGBEVF_MIN_RXD 64
/* Supported Rx Buffer Sizes */
-#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
@@ -140,22 +118,42 @@ struct ixgbevf_ring_feature {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+struct ixgbevf_ring_container {
+ struct ixgbevf_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+/* iterator for handling rings in ring container */
+#define ixgbevf_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbevf_q_vector {
struct ixgbevf_adapter *adapter;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
+ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi;
- DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
- DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
- u8 rxr_count; /* Rx ring count assigned to this vector */
- u8 txr_count; /* Tx ring count assigned to this vector */
- u8 tx_itr;
- u8 rx_itr;
- u32 eitr;
- int v_idx; /* vector index in list */
+ struct ixgbevf_ring_container rx, tx;
+ char name[IFNAMSIZ + 9];
};
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define IXGBE_MIN_RSC_ITR 24
+#define IXGBE_100K_ITR 40
+#define IXGBE_20K_ITR 200
+#define IXGBE_10K_ITR 400
+#define IXGBE_8K_ITR 500
+
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways. The lowest value
* supported by all of the ixgbe hardware is 8.
@@ -168,12 +166,12 @@ struct ixgbevf_q_vector {
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define IXGBE_RX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
-#define IXGBE_TX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
-#define IXGBE_TX_CTXTDESC_ADV(R, i) \
- (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+#define IXGBEVF_RX_DESC(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_DESC(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_CTXTDESC(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
@@ -181,9 +179,8 @@ struct ixgbevf_q_vector {
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 2
-#define MAX_MSIX_COUNT 2
-#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
@@ -193,12 +190,14 @@ struct ixgbevf_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
/* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ /* interrupt masks */
+ u32 eims_enable_mask;
+ u32 eims_other;
/* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */
@@ -213,18 +212,13 @@ struct ixgbevf_adapter {
/* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
- int num_rx_pools; /* == num_rx_queues in 82598 */
- int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
- struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -232,15 +226,8 @@ struct ixgbevf_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -254,18 +241,16 @@ struct ixgbevf_adapter {
u32 eitr_param;
unsigned long state;
- u32 *config_space;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
bool link_up;
- unsigned long link_check_timeout;
struct work_struct watchdog_task;
- bool netdev_registered;
- bool dev_closed;
+
+ spinlock_t mbx_lock;
};
enum ixbgevf_state_t {
@@ -301,11 +286,8 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-
-#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *ifr);
-#endif
extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec4288b1..3f9841d619a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -42,6 +42,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/checksum.h>
@@ -97,9 +98,7 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg);
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
struct ixgbevf_ring *rx_ring,
@@ -115,7 +114,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
}
-/*
+/**
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -146,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
}
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer
*tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
@@ -175,38 +174,34 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#ifdef MAX_SKB_FRAGS
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-#else
-#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
-#endif
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void ixgbevf_tx_timeout(struct net_device *netdev);
/**
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: board private structure
* @tx_ring: tx ring to clean
**/
-static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
- struct net_device *netdev = adapter->netdev;
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return true;
+
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->work_limit)) {
+ (count < tx_ring->count)) {
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
/* eop could change between read and DD-check */
@@ -214,7 +209,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
goto cont_loop;
for ( ; !cleaned; count++) {
struct sk_buff *skb;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
skb = tx_buffer_info->skb;
@@ -231,7 +226,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
total_bytes += bytecount;
}
- ixgbevf_unmap_and_free_tx_resource(adapter,
+ ixgbevf_unmap_and_free_tx_resource(tx_ring,
tx_buffer_info);
tx_desc->wb.status = 0;
@@ -243,37 +238,25 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
cont_loop:
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
}
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
-#ifdef HAVE_TX_MQ
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
- !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
- }
-#else
- if (netif_queue_stopped(netdev) &&
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_queue(netdev);
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
++adapter->restart_queue;
}
-#endif
- }
-
- /* re-arm the interrupt */
- if ((count >= tx_ring->work_limit) &&
- (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
}
u64_stats_update_begin(&tx_ring->syncp);
@@ -281,7 +264,7 @@ cont_loop:
tx_ring->total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
- return count < tx_ring->work_limit;
+ return count < tx_ring->count;
}
/**
@@ -301,13 +284,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- if (is_vlan && test_bit(tag, adapter->active_vlans))
+ if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(&q_vector->napi, skb);
- else
- netif_rx(skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -317,12 +297,13 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
* @skb: skb currently being received and modified
**/
static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
/* Rx csum disabled */
- if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* if IP and error */
@@ -357,52 +338,21 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+ unsigned int i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
-
- if (!bi->page_dma &&
- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
skb = bi->skb;
if (!skb) {
- skb = netdev_alloc_skb(adapter->netdev,
- bufsz);
-
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
bi->skb = skb;
}
if (!bi->dma) {
@@ -410,14 +360,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- }
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
i++;
if (i == rx_ring->count)
@@ -428,36 +371,22 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
}
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
- u64 qmask)
+ u32 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
- mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-}
-
-static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
- int *work_done, int work_to_do)
+ int budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -466,36 +395,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb;
unsigned int i;
u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
- u32 upper_len = 0;
- if (*work_done >= work_to_do)
+ if (!budget)
break;
- (*work_done)++;
+ budget--;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hdr_info & IXGBE_RXDADV_SPH)
- adapter->rx_hdr_split++;
- if (len > IXGBEVF_RX_HDR_SIZE)
- len = IXGBEVF_RX_HDR_SIZE;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ len = le16_to_cpu(rx_desc->wb.upper.length);
skb = rx_buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN);
rx_buffer_info->skb = NULL;
@@ -508,46 +422,19 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb_put(skb, len);
}
- if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
-
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
- get_page(rx_buffer_info->page);
-
- skb->len += upper_len;
- skb->data_len += upper_len;
- skb->truesize += upper_len;
- }
-
i++;
if (i == rx_ring->count)
i = 0;
- next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
prefetch(next_rxd);
cleaned_count++;
next_buffer = &rx_ring->rx_buffer_info[i];
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_buffer_info->skb = next_buffer->skb;
- rx_buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- } else {
- skb->next = next_buffer->skb;
- skb->next->prev = skb;
- }
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
adapter->non_eop_descs++;
goto next_desc;
}
@@ -558,7 +445,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_rx_checksum(adapter, staterr, skb);
+ ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -573,7 +460,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
@@ -605,95 +492,74 @@ next_desc:
rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
- return cleaned;
+ return !!budget;
}
/**
- * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * ixgbevf_poll - NAPI polling calback
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
+ * This function will clean more than one or more rings associated with a
+ * q_vector.
**/
-static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+static int ixgbevf_poll(struct napi_struct *napi, int budget)
{
struct ixgbevf_q_vector *q_vector =
container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0;
- long r_idx;
+ struct ixgbevf_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
-
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
- }
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbevf_set_itr(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter,
+ 1 << q_vector->v_idx);
- return work_done;
+ return 0;
}
/**
- * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean more than one rx queue associated with a
- * q_vector.
- **/
-static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbevf_q_vector *q_vector =
- container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0, i;
- long r_idx;
- u64 enable_mask = 0;
-
- /* attempt to distribute budget to each queue fairly, but don't allow
- * the budget to go below 1 because we'll exit polling */
- budget /= (q_vector->rxr_count ?: 1);
- budget = max(budget, 1);
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
- enable_mask |= rx_ring->v_idx;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
-#ifndef HAVE_NETDEV_NAPI_LIST
- if (!netif_running(adapter->netdev))
- work_done = 0;
-
-#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, enable_mask);
- }
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
- return work_done;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
-
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -704,56 +570,49 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
{
struct ixgbevf_q_vector *q_vector;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j, q_vectors, v_idx, r_idx;
- u32 mask;
+ int q_vectors, v_idx;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ adapter->eims_enable_mask = 0;
/*
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ struct ixgbevf_ring *ring;
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_set_bit(...) */
- r_idx = find_first_bit(q_vector->rxr_idx,
- adapter->num_rx_queues);
-
- for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 0, j, v_idx);
- r_idx = find_next_bit(q_vector->rxr_idx,
- adapter->num_rx_queues,
- r_idx + 1);
- }
- r_idx = find_first_bit(q_vector->txr_idx,
- adapter->num_tx_queues);
-
- for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 1, j, v_idx);
- r_idx = find_next_bit(q_vector->txr_idx,
- adapter->num_tx_queues,
- r_idx + 1);
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+ if (q_vector->tx.ring && !q_vector->rx.ring) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
}
- /* if this is a tx only vector halve the interrupt rate */
- if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
- else if (q_vector->rxr_count)
- /* rx only */
- q_vector->eitr = adapter->eitr_param;
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= 1 << v_idx;
- ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
-
- /* set up to autoclear timer, and the vectors */
- mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~IXGBE_EIMS_OTHER;
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+ /* setup eims_other and add value to global eims_enable_mask */
+ adapter->eims_other = 1 << v_idx;
+ adapter->eims_enable_mask |= adapter->eims_other;
}
enum latency_range {
@@ -765,11 +624,8 @@ enum latency_range {
/**
* ixgbevf_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @eitr: eitr setting (ints per sec) to give last timeslice
- * @itr_setting: current throttle rate in ints/second
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
@@ -779,17 +635,17 @@ enum latency_range {
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring_container *ring_container)
{
- unsigned int retval = itr_setting;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
u32 timepassed_us;
u64 bytes_perint;
+ u8 itr_setting = ring_container->itr;
if (packets == 0)
- goto update_itr_done;
-
+ return;
/* simple throttlerate management
* 0-20MB/s lowest (100000 ints/s)
@@ -797,134 +653,77 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
* 100-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
- timepassed_us = 1000000/eitr;
+ timepassed_us = q_vector->itr >> 2;
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
case lowest_latency:
- if (bytes_perint > adapter->eitr_low)
- retval = low_latency;
+ if (bytes_perint > 10)
+ itr_setting = low_latency;
break;
case low_latency:
- if (bytes_perint > adapter->eitr_high)
- retval = bulk_latency;
- else if (bytes_perint <= adapter->eitr_low)
- retval = lowest_latency;
+ if (bytes_perint > 20)
+ itr_setting = bulk_latency;
+ else if (bytes_perint <= 10)
+ itr_setting = lowest_latency;
break;
case bulk_latency:
- if (bytes_perint <= adapter->eitr_high)
- retval = low_latency;
+ if (bytes_perint <= 20)
+ itr_setting = low_latency;
break;
}
-update_itr_done:
- return retval;
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
}
-/**
- * ixgbevf_write_eitr - write VTEITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
- *
- * This function is made to be called by ethtool and by the driver
- * when it needs to update VTEITR registers at runtime. Hardware
- * specific quirks/differences are taken care of here.
- */
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg)
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbe_hw *hw = &adapter->hw;
-
- itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
-
- /*
- * set the WDIS bit to not clear the timer bits and cause an
- * immediate assertion of the interrupt
- */
- itr_reg |= IXGBE_EITR_CNT_WDIS;
+ u32 new_itr = q_vector->itr;
+ u8 current_itr;
- IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
-}
+ ixgbevf_update_itr(q_vector, &q_vector->tx);
+ ixgbevf_update_itr(q_vector, &q_vector->rx);
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
-{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- u32 new_itr;
- u8 current_itr, ret_itr;
- int i, r_idx, v_idx = q_vector->v_idx;
- struct ixgbevf_ring *rx_ring, *tx_ring;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 100000;
+ new_itr = IXGBE_100K_ITR;
break;
case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
+ new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
default:
- new_itr = 8000;
+ new_itr = IXGBE_8K_ITR;
break;
}
- if (new_itr != q_vector->eitr) {
- u32 itr_reg;
-
- /* save the algorithm value here, not the smoothed one */
- q_vector->eitr = new_itr;
+ if (new_itr != q_vector->itr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
- itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
- ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ new_itr = (10 * new_itr * q_vector->itr) /
+ ((9 * new_itr) + q_vector->itr);
+
+ /* save the algorithm value here */
+ q_vector->itr = new_itr;
+
+ ixgbevf_write_eitr(q_vector);
}
}
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
- u32 eicr;
u32 msg;
bool got_ack = false;
- eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
-
if (!hw->mbx.ops.check_for_ack(hw))
got_ack = true;
@@ -953,75 +752,24 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
if (got_ack)
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
-{
- struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *tx_ring;
- int i, r_idx;
-
- if (!q_vector->txr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- tx_ring->total_bytes = 0;
- tx_ring->total_packets = 0;
- ixgbevf_clean_tx_irq(adapter, tx_ring);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
return IRQ_HANDLED;
}
+
/**
- * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused
* @data: pointer to our q_vector struct for this interrupt vector
**/
-static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
{
struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- int r_idx;
- int i;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- rx_ring->total_bytes = 0;
- rx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- if (!q_vector->rxr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
- /* disable interrupts on this vector only */
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
-{
- ixgbevf_msix_clean_rx(irq, data);
- ixgbevf_msix_clean_tx(irq, data);
+ /* EIAM disabled interrupts (on this vector) for us */
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -1031,9 +779,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(r_idx, q_vector->rxr_idx);
- q_vector->rxr_count++;
- a->rx_ring[r_idx].v_idx = 1 << v_idx;
+ a->rx_ring[r_idx].next = q_vector->rx.ring;
+ q_vector->rx.ring = &a->rx_ring[r_idx];
+ q_vector->rx.count++;
}
static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
@@ -1041,9 +789,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(t_idx, q_vector->txr_idx);
- q_vector->txr_count++;
- a->tx_ring[t_idx].v_idx = 1 << v_idx;
+ a->tx_ring[t_idx].next = q_vector->tx.ring;
+ q_vector->tx.ring = &a->tx_ring[t_idx];
+ q_vector->tx.count++;
}
/**
@@ -1119,37 +867,30 @@ out:
static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- irqreturn_t (*handler)(int, void *);
- int i, vector, q_vectors, err;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int vector, err;
int ri = 0, ti = 0;
- /* Decrement for Other and TCP Timer vectors */
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
- ? &ixgbevf_msix_clean_many : \
- (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
- (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
- NULL)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(adapter->q_vector[vector]);
-
- if (handler == &ixgbevf_msix_clean_rx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "rx", ri++);
- } else if (handler == &ixgbevf_msix_clean_tx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "tx", ti++);
- } else if (handler == &ixgbevf_msix_clean_many) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
} else {
/* skip this unused q_vector */
continue;
}
- err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
+ q_vector->name, q_vector);
if (err) {
hw_dbg(&adapter->hw,
"request_irq failed for MSIX interrupt "
@@ -1158,9 +899,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
}
}
- sprintf(adapter->name[vector], "%s:mbx", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ &ixgbevf_msix_mbx, 0, netdev->name, adapter);
if (err) {
hw_dbg(&adapter->hw,
"request_irq for msix_mbx failed: %d\n", err);
@@ -1170,9 +910,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
return 0;
free_queue_irqs:
- for (i = vector - 1; i >= 0; i--)
- free_irq(adapter->msix_entries[--vector].vector,
- &(adapter->q_vector[i]));
+ while (vector) {
+ vector--;
+ free_irq(adapter->msix_entries[vector].vector,
+ adapter->q_vector[vector]);
+ }
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -1185,11 +927,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
for (i = 0; i < q_vectors; i++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
- bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
- bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
- q_vector->rxr_count = 0;
- q_vector->txr_count = 0;
- q_vector->eitr = adapter->eitr_param;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
}
}
@@ -1215,17 +956,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
-
i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, netdev);
+ free_irq(adapter->msix_entries[i].vector, adapter);
i--;
for (; i >= 0; i--) {
+ /* free only the irqs that were actually requested */
+ if (!adapter->q_vector[i]->rx.ring &&
+ !adapter->q_vector[i]->tx.ring)
+ continue;
+
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
@@ -1239,10 +983,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
**/
static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
{
- int i;
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
IXGBE_WRITE_FLUSH(hw);
@@ -1254,23 +1000,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
* ixgbevf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
- bool queues, bool flush)
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 mask;
- u64 qmask;
- mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
- qmask = ~0;
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- if (queues)
- ixgbevf_irq_enable_queues(adapter, qmask);
-
- if (flush)
- IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
}
/**
@@ -1320,29 +1056,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- u16 bufsz = IXGBEVF_RXBUFFER_2048;
- /* grow the amount we can receive on large page machines */
- if (bufsz < (PAGE_SIZE / 2))
- bufsz = (PAGE_SIZE / 2);
- /* cap the bufsz at our largest descriptor size */
- bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
-
- srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- } else {
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
- srrctl |= IXGBEVF_RXBUFFER_2048 >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= rx_ring->rx_buf_len >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- }
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
@@ -1362,36 +1083,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
u32 rdlen;
int rx_buf_len;
- /* Decide whether to use packet split mode or not */
- if (netdev->mtu > ETH_DATA_LEN) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- } else {
- if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- }
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- /* PSRTYPE must be initialized in 82599 */
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR |
- IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
- rx_buf_len = IXGBEVF_RX_HDR_SIZE;
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
- if (netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
- }
+ /* PSRTYPE must be initialized in 82599 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1418,9 +1115,14 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* add VID to filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
+
+ spin_unlock(&adapter->mbx_lock);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -1431,9 +1133,14 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* remove VID from filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -1488,11 +1195,15 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* reprogram multicast list */
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
+
+ spin_unlock(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1502,15 +1213,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct napi_struct *napi;
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
- napi = &q_vector->napi;
- if (q_vector->rxr_count > 1)
- napi->poll = &ixgbevf_clean_rxonly_many;
-
- napi_enable(napi);
+ napi_enable(&q_vector->napi);
}
}
@@ -1522,8 +1226,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
napi_disable(&q_vector->napi);
}
}
@@ -1541,9 +1243,8 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
ixgbevf_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ ixgbevf_alloc_rx_buffers(adapter, ring,
+ IXGBE_DESC_UNUSED(ring));
}
}
@@ -1647,6 +1348,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar) {
if (is_valid_ether_addr(hw->mac.addr))
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
@@ -1658,6 +1361,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mbx.ops.write_posted(hw, msg, 2);
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1667,10 +1372,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_save_reset_stats(adapter);
ixgbevf_init_last_counter_stats(adapter);
- /* bring the link up in the watchdog, this could race with our first
- * link up interrupt but shouldn't be a problem */
- adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
- adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
}
@@ -1685,7 +1386,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_VTEICR);
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
}
/**
@@ -1723,14 +1424,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
dev_kfree_skb(this);
} while (skb);
}
- if (!rx_buffer_info->page)
- continue;
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- put_page(rx_buffer_info->page);
- rx_buffer_info->page = NULL;
- rx_buffer_info->page_offset = 0;
}
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -1767,7 +1460,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -1882,11 +1575,15 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
+ spin_unlock(&adapter->mbx_lock);
+
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1900,10 +1597,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
{
int err, vector_threshold;
- /* We'll want at least 3 (vector_threshold):
- * 1) TxQ[0] Cleanup
- * 2) RxQ[0] Cleanup
- * 3) Other (Link Status Change, etc.)
+ /* We'll want at least 2 (vector_threshold):
+ * 1) TxQ[0] + RxQ[0] handler
+ * 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
@@ -1942,8 +1638,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
}
-/*
- * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
+/**
+ * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -1958,8 +1654,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
- adapter->num_rx_pools = adapter->num_rx_queues;
- adapter->num_rx_queues_per_pool = 1;
}
/**
@@ -1988,12 +1682,16 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i].dev = &adapter->pdev->dev;
+ adapter->tx_ring[i].netdev = adapter->netdev;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i].count = adapter->rx_ring_count;
adapter->rx_ring[i].queue_index = i;
adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i].dev = &adapter->pdev->dev;
+ adapter->rx_ring[i].netdev = adapter->netdev;
}
return 0;
@@ -2020,10 +1718,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ * (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
*/
- v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
@@ -2054,12 +1754,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{
int q_idx, num_q_vectors;
struct ixgbevf_q_vector *q_vector;
- int napi_vectors;
- int (*poll)(struct napi_struct *, int);
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
- poll = &ixgbevf_clean_rxonly;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
@@ -2067,10 +1763,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
goto err_out;
q_vector->adapter = adapter;
q_vector->v_idx = q_idx;
- q_vector->eitr = adapter->eitr_param;
- if (q_idx < napi_vectors)
- netif_napi_add(adapter->netdev, &q_vector->napi,
- (*poll), 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbevf_poll, 64);
adapter->q_vector[q_idx] = q_vector;
}
@@ -2216,21 +1910,17 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->netdev->addr_len);
}
- /* Enable dynamic interrupt throttling rates */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
- /* set defaults for eitr in MegaBytes */
- adapter->eitr_low = 10;
- adapter->eitr_high = 20;
+ /* Enable dynamic interrupt throttling rates */
+ adapter->rx_itr_setting = 1;
+ adapter->tx_itr_setting = 1;
/* set default ring sizes */
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
- /* enable rx csum by default */
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -2290,7 +1980,7 @@ static void ixgbevf_watchdog(unsigned long data)
{
struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
struct ixgbe_hw *hw = &adapter->hw;
- u64 eics = 0;
+ u32 eics = 0;
int i;
/*
@@ -2304,11 +1994,11 @@ static void ixgbevf_watchdog(unsigned long data)
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
- if (qv->rxr_count || qv->txr_count)
- eics |= (1 << i);
+ if (qv->rx.ring || qv->tx.ring)
+ eics |= 1 << i;
}
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
watchdog_short_circuit:
schedule_work(&adapter->watchdog_task);
@@ -2362,8 +2052,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* no LSC interrupt
*/
if (hw->mac.ops.check_link) {
- if ((hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false)) != 0) {
+ s32 need_reset;
+
+ spin_lock(&adapter->mbx_lock);
+
+ need_reset = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
+ if (need_reset) {
adapter->link_up = link_up;
adapter->link_speed = link_speed;
netif_carrier_off(netdev);
@@ -2478,7 +2176,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->work_limit = tx_ring->count;
return 0;
err:
@@ -2682,7 +2379,7 @@ static int ixgbevf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
return 0;
@@ -2724,172 +2421,153 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
-static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl;
- u32 mss_l4len_idx, l4len;
+ u16 i = tx_ring->next_to_use;
- if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- adapter->hw_tso_ctxt++;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- adapter->hw_tso6_ctxt++;
- }
+ context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
- i = tx_ring->next_to_use;
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- vlan_macip_lens |=
- (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len +=
- (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->protocol == htons(ETH_P_IP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx =
- (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
- /* use index 1 for TSO */
- mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
- return true;
+static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
}
- return false;
+ /* compute header lengths */
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
- struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |= (tx_flags &
- IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- case __constant_htons(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- pr_warn("partial checksum but "
- "proto=%x!\n", skb->protocol);
- }
- break;
- }
- }
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
- /* use index zero for tx checksum offload */
- context_desc->mss_l4len_idx = 0;
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
- adapter->hw_csum_tx_good++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4_hdr = 0;
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
- return true;
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
+ }
+ break;
+ }
}
- return false;
+ /* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
unsigned int first)
{
- struct pci_dev *pdev = adapter->pdev;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
@@ -2908,12 +2586,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_single(tx_ring->dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2938,12 +2615,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->dma =
- skb_frag_dma_map(&adapter->pdev->dev, frag,
+ skb_frag_dma_map(tx_ring->dev, frag,
offset, size, DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev,
+ tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2964,15 +2641,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
+ tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
- tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
count--;
@@ -2983,14 +2660,13 @@ dma_error:
if (i < 0)
i += tx_ring->count;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return count;
}
-static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring, int tx_flags,
+static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -3007,28 +2683,31 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+
if (tx_flags & IXGBE_TX_FLAGS_TSO) {
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
-
/* use index 1 context for tso */
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
+
+ }
- } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ /*
+ * Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= IXGBE_ADVTXD_CC;
olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
i = tx_ring->next_to_use;
while (count--) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
tx_desc->read.cmd_type_len =
cpu_to_le32(cmd_type_len | tx_buffer_info->length);
@@ -3040,24 +2719,14 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
}
-static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
@@ -3069,17 +2738,16 @@ static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(netdev, tx_ring->queue_index);
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++adapter->restart_queue;
return 0;
}
-static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+ return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -3090,54 +2758,66 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_flags = 0;
u8 hdr_len = 0;
int r_idx = 0, tso;
- int count = 0;
-
- unsigned int f;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
tx_ring = &adapter->tx_ring[r_idx];
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- /* four things can cause us to need a context descriptor */
- if (skb_is_gso(skb) ||
- (skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN))
- count++;
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
-
- if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
- return NETDEV_TX_BUSY;
- }
-
first = tx_ring->next_to_use;
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
+ else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
- ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
- ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ ixgbevf_tx_queue(tx_ring, tx_flags,
+ ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
- ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
}
@@ -3161,9 +2841,13 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar)
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ spin_unlock(&adapter->mbx_lock);
+
return 0;
}
@@ -3220,9 +2904,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_free_all_rx_resources(adapter);
}
-#ifdef CONFIG_PM
pci_save_state(pdev);
-#endif
pci_disable_device(pdev);
}
@@ -3265,19 +2947,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev,
- netdev_features_t features)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- if (features & NETIF_F_RXCSUM)
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
- return 0;
-}
-
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3290,7 +2959,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
- .ndo_set_features = ixgbevf_set_features,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3350,12 +3018,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
pci_set_master(pdev);
-#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
MAX_TX_QUEUES);
-#else
- netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
-#endif
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -3396,10 +3060,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
- adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
-
/* setup the private structure */
err = ixgbevf_sw_init(adapter);
if (err)
@@ -3458,8 +3118,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- adapter->netdev_registered = true;
-
netif_carrier_off(netdev);
ixgbevf_init_last_counter_stats(adapter);
@@ -3469,8 +3127,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled\n");
-
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
return 0;
@@ -3510,10 +3166,8 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
- if (adapter->netdev_registered) {
+ if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- adapter->netdev_registered = false;
- }
ixgbevf_reset_interrupt_capability(adapter);
@@ -3530,12 +3184,92 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/**
+ * ixgbevf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbevf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the ixgbevf_resume routine.
+ */
+static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+
+ ixgbevf_reset(adapter);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgbevf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the ixgbevf_resume routine.
+ */
+static void ixgbevf_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+ netif_device_attach(netdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers ixgbevf_err_handler = {
+ .error_detected = ixgbevf_io_error_detected,
+ .slot_reset = ixgbevf_io_slot_reset,
+ .resume = ixgbevf_io_resume,
+};
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = __devexit_p(ixgbevf_remove),
.shutdown = ixgbevf_shutdown,
+ .err_handler = &ixgbevf_err_handler
};
/**