aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/mlx4_en.h')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h136
1 files changed, 84 insertions, 52 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35fb60e2320..d72a5a894fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -45,6 +45,7 @@
#include <linux/dcbnl.h>
#endif
#include <linux/cpu_rmap.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -56,8 +57,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.0"
-#define DRV_RELDATE "Dec 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -125,6 +126,8 @@ enum {
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP)
+#define MLX4_EN_DEFAULT_TX_WORK 256
+
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
@@ -186,6 +189,13 @@ enum {
#define GET_AVG_PERF_COUNTER(cnt) (0)
#endif /* MLX4_EN_PERF_STAT */
+/* Constants for TX flow */
+enum {
+ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+ MAX_BF = 256,
+ MIN_PKT_LEN = 17,
+};
+
/*
* Configurables
*/
@@ -237,8 +247,8 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
- u32 offset;
- u32 size;
+ u32 page_offset;
+ u32 page_size;
};
struct mlx4_en_tx_ring {
@@ -255,6 +265,8 @@ struct mlx4_en_tx_ring {
u16 poll_cnt;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
+ u8 queue_index;
+ cpumask_t affinity_mask;
u32 last_nr_txbb;
struct mlx4_qp qp;
struct mlx4_qp_context context;
@@ -264,10 +276,13 @@ struct mlx4_en_tx_ring {
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
struct mlx4_bf bf;
bool bf_enabled;
struct netdev_queue *tx_queue;
int hwtstamp_tx_type;
+ int inline_thold;
};
struct mlx4_en_rx_desc {
@@ -292,7 +307,7 @@ struct mlx4_en_rx_ring {
void *rx_info;
unsigned long bytes;
unsigned long packets;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long yields;
unsigned long misses;
unsigned long cleaned;
@@ -300,13 +315,13 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
int hwtstamp_rx_filter;
+ cpumask_var_t affinity_mask;
};
struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
- spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
int size;
@@ -318,7 +333,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define MLX4_EN_CQ_STATE_IDLE 0
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
@@ -329,7 +344,8 @@ struct mlx4_en_cq {
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct irq_desc *irq_desc;
};
struct mlx4_en_port_profile {
@@ -343,6 +359,7 @@ struct mlx4_en_port_profile {
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
+ int inline_thold;
};
struct mlx4_en_profile {
@@ -373,10 +390,14 @@ struct mlx4_en_dev {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
+ rwlock_t clock_lock;
+ u32 nominal_c_mult;
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
unsigned long overflow_period;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
@@ -434,6 +455,7 @@ struct mlx4_en_mc_list {
enum mlx4_en_mclist_act action;
u8 addr[ETH_ALEN];
u64 reg_id;
+ u64 tunnel_reg_id;
};
struct mlx4_en_frag_info {
@@ -523,6 +545,7 @@ struct mlx4_en_priv {
__be32 ctrl_flags;
u32 flags;
u8 num_tx_rings_p_up;
+ u32 tx_work_limit;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
@@ -530,16 +553,20 @@ struct mlx4_en_priv {
u16 num_frags;
u16 log_rx_info;
- struct mlx4_en_tx_ring *tx_ring;
- struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
- struct mlx4_en_cq *tx_cq;
- struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_en_tx_ring **tx_ring;
+ struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
+ struct mlx4_en_cq **tx_cq;
+ struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
+#ifdef CONFIG_MLX4_EN_VXLAN
+ struct work_struct vxlan_add_task;
+ struct work_struct vxlan_del_task;
+#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
@@ -565,7 +592,8 @@ struct mlx4_en_priv {
struct list_head filters;
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
#endif
-
+ u64 tunnel_reg_id;
+ __be16 vxlan_port;
};
enum mlx4_en_wol {
@@ -580,7 +608,7 @@ struct mlx4_mac_entry {
struct rcu_head rcu;
};
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
{
spin_lock_init(&cq->poll_lock);
@@ -626,7 +654,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
if ((cq->state & MLX4_CQ_LOCKED)) {
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
rc = false;
@@ -653,7 +681,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
return cq->state & CQ_USER_PEND;
@@ -683,11 +711,11 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
return false;
}
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
@@ -704,9 +732,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
-int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
- int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
+ int entries, int ring, enum cq_type mode, int node);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx);
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -714,23 +742,27 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
-int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
- int qpn, u32 size, u16 stride);
-void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring,
+ int qpn, u32 size, u16 stride,
+ int node, int queue_index);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq, int user_prio);
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
-
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- u32 size, u16 stride);
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride, int node);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -739,6 +771,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
struct mlx4_en_cq *cq,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
@@ -768,13 +801,11 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
#ifdef CONFIG_RFS_ACCEL
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *rx_ring);
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
#endif
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
-u64 mlx4_en_mac_to_u64(u8 *addr);
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
/*
@@ -785,6 +816,7 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
int mlx4_en_timestamp_config(struct net_device *dev,
int tx_type,
int rx_filter);
@@ -803,26 +835,26 @@ __printf(3, 4)
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...);
-#define en_dbg(mlevel, priv, format, arg...) \
-do { \
- if (NETIF_MSG_##mlevel & priv->msg_enable) \
- en_print(KERN_DEBUG, priv, format, ##arg); \
+#define en_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
+ en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
} while (0)
-#define en_warn(priv, format, arg...) \
- en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...) \
- en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...) \
- en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...) \
- pr_err("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...) \
- pr_info("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- pr_warning("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
+#define en_warn(priv, format, ...) \
+ en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
+#define en_err(priv, format, ...) \
+ en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
+#define en_info(priv, format, ...) \
+ en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
+
+#define mlx4_err(mdev, format, ...) \
+ pr_err(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ pr_info(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ pr_warn(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#endif